1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
40 void gfs2_page_add_databufs(struct gfs2_inode
*ip
, struct page
*page
,
41 unsigned int from
, unsigned int len
)
43 struct buffer_head
*head
= page_buffers(page
);
44 unsigned int bsize
= head
->b_size
;
45 struct buffer_head
*bh
;
46 unsigned int to
= from
+ len
;
47 unsigned int start
, end
;
49 for (bh
= head
, start
= 0; bh
!= head
|| !start
;
50 bh
= bh
->b_this_page
, start
= end
) {
56 set_buffer_uptodate(bh
);
57 gfs2_trans_add_data(ip
->i_gl
, bh
);
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
71 static int gfs2_get_block_noalloc(struct inode
*inode
, sector_t lblock
,
72 struct buffer_head
*bh_result
, int create
)
76 error
= gfs2_block_map(inode
, lblock
, bh_result
, 0);
79 if (!buffer_mapped(bh_result
))
85 * gfs2_writepage - Write page for writeback mappings
87 * @wbc: The writeback control
89 static int gfs2_writepage(struct page
*page
, struct writeback_control
*wbc
)
91 struct inode
*inode
= page
->mapping
->host
;
92 struct gfs2_inode
*ip
= GFS2_I(inode
);
93 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
94 struct iomap_writepage_ctx wpc
= { };
96 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
)))
98 if (current
->journal_info
)
100 return iomap_writepage(page
, wbc
, &wpc
, &gfs2_writeback_ops
);
103 redirty_page_for_writepage(wbc
, page
);
110 * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
111 * @page: The page to write
112 * @wbc: The writeback control
114 * This is the same as calling block_write_full_page, but it also
115 * writes pages outside of i_size
117 static int gfs2_write_jdata_page(struct page
*page
,
118 struct writeback_control
*wbc
)
120 struct inode
* const inode
= page
->mapping
->host
;
121 loff_t i_size
= i_size_read(inode
);
122 const pgoff_t end_index
= i_size
>> PAGE_SHIFT
;
126 * The page straddles i_size. It must be zeroed out on each and every
127 * writepage invocation because it may be mmapped. "A file is mapped
128 * in multiples of the page size. For a file that is not a multiple of
129 * the page size, the remaining memory is zeroed when mapped, and
130 * writes to that region are not written out to the file."
132 offset
= i_size
& (PAGE_SIZE
- 1);
133 if (page
->index
== end_index
&& offset
)
134 zero_user_segment(page
, offset
, PAGE_SIZE
);
136 return __block_write_full_page(inode
, page
, gfs2_get_block_noalloc
, wbc
,
137 end_buffer_async_write
);
141 * __gfs2_jdata_writepage - The core of jdata writepage
142 * @page: The page to write
143 * @wbc: The writeback control
145 * This is shared between writepage and writepages and implements the
146 * core of the writepage operation. If a transaction is required then
147 * PageChecked will have been set and the transaction will have
148 * already been started before this is called.
151 static int __gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
153 struct inode
*inode
= page
->mapping
->host
;
154 struct gfs2_inode
*ip
= GFS2_I(inode
);
155 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
157 if (PageChecked(page
)) {
158 ClearPageChecked(page
);
159 if (!page_has_buffers(page
)) {
160 create_empty_buffers(page
, inode
->i_sb
->s_blocksize
,
161 BIT(BH_Dirty
)|BIT(BH_Uptodate
));
163 gfs2_page_add_databufs(ip
, page
, 0, sdp
->sd_vfs
->s_blocksize
);
165 return gfs2_write_jdata_page(page
, wbc
);
169 * gfs2_jdata_writepage - Write complete page
170 * @page: Page to write
171 * @wbc: The writeback control
177 static int gfs2_jdata_writepage(struct page
*page
, struct writeback_control
*wbc
)
179 struct inode
*inode
= page
->mapping
->host
;
180 struct gfs2_inode
*ip
= GFS2_I(inode
);
181 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
183 if (gfs2_assert_withdraw(sdp
, gfs2_glock_is_held_excl(ip
->i_gl
)))
185 if (PageChecked(page
) || current
->journal_info
)
187 return __gfs2_jdata_writepage(page
, wbc
);
190 redirty_page_for_writepage(wbc
, page
);
197 * gfs2_writepages - Write a bunch of dirty pages back to disk
198 * @mapping: The mapping to write
199 * @wbc: Write-back control
201 * Used for both ordered and writeback modes.
203 static int gfs2_writepages(struct address_space
*mapping
,
204 struct writeback_control
*wbc
)
206 struct gfs2_sbd
*sdp
= gfs2_mapping2sbd(mapping
);
207 struct iomap_writepage_ctx wpc
= { };
211 * Even if we didn't write any pages here, we might still be holding
212 * dirty pages in the ail. We forcibly flush the ail because we don't
213 * want balance_dirty_pages() to loop indefinitely trying to write out
214 * pages held in the ail that it can't find.
216 ret
= iomap_writepages(mapping
, wbc
, &wpc
, &gfs2_writeback_ops
);
218 set_bit(SDF_FORCE_AIL_FLUSH
, &sdp
->sd_flags
);
223 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
224 * @mapping: The mapping
225 * @wbc: The writeback control
226 * @pvec: The vector of pages
227 * @nr_pages: The number of pages to write
228 * @done_index: Page index
230 * Returns: non-zero if loop should terminate, zero otherwise
233 static int gfs2_write_jdata_pagevec(struct address_space
*mapping
,
234 struct writeback_control
*wbc
,
235 struct pagevec
*pvec
,
239 struct inode
*inode
= mapping
->host
;
240 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
241 unsigned nrblocks
= nr_pages
* (PAGE_SIZE
>> inode
->i_blkbits
);
245 ret
= gfs2_trans_begin(sdp
, nrblocks
, nrblocks
);
249 for(i
= 0; i
< nr_pages
; i
++) {
250 struct page
*page
= pvec
->pages
[i
];
252 *done_index
= page
->index
;
256 if (unlikely(page
->mapping
!= mapping
)) {
262 if (!PageDirty(page
)) {
263 /* someone wrote it for us */
264 goto continue_unlock
;
267 if (PageWriteback(page
)) {
268 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
269 wait_on_page_writeback(page
);
271 goto continue_unlock
;
274 BUG_ON(PageWriteback(page
));
275 if (!clear_page_dirty_for_io(page
))
276 goto continue_unlock
;
278 trace_wbc_writepage(wbc
, inode_to_bdi(inode
));
280 ret
= __gfs2_jdata_writepage(page
, wbc
);
282 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
288 * done_index is set past this page,
289 * so media errors will not choke
290 * background writeout for the entire
291 * file. This has consequences for
292 * range_cyclic semantics (ie. it may
293 * not be suitable for data integrity
296 *done_index
= page
->index
+ 1;
303 * We stop writing back only if we are not doing
304 * integrity sync. In case of integrity sync we have to
305 * keep going until we have written all the pages
306 * we tagged for writeback prior to entering this loop.
308 if (--wbc
->nr_to_write
<= 0 && wbc
->sync_mode
== WB_SYNC_NONE
) {
319 * gfs2_write_cache_jdata - Like write_cache_pages but different
320 * @mapping: The mapping to write
321 * @wbc: The writeback control
323 * The reason that we use our own function here is that we need to
324 * start transactions before we grab page locks. This allows us
325 * to get the ordering right.
328 static int gfs2_write_cache_jdata(struct address_space
*mapping
,
329 struct writeback_control
*wbc
)
335 pgoff_t writeback_index
;
344 if (wbc
->range_cyclic
) {
345 writeback_index
= mapping
->writeback_index
; /* prev offset */
346 index
= writeback_index
;
353 index
= wbc
->range_start
>> PAGE_SHIFT
;
354 end
= wbc
->range_end
>> PAGE_SHIFT
;
355 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
357 cycled
= 1; /* ignore range_cyclic tests */
359 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
360 tag
= PAGECACHE_TAG_TOWRITE
;
362 tag
= PAGECACHE_TAG_DIRTY
;
365 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
366 tag_pages_for_writeback(mapping
, index
, end
);
368 while (!done
&& (index
<= end
)) {
369 nr_pages
= pagevec_lookup_range_tag(&pvec
, mapping
, &index
, end
,
374 ret
= gfs2_write_jdata_pagevec(mapping
, wbc
, &pvec
, nr_pages
, &done_index
);
379 pagevec_release(&pvec
);
383 if (!cycled
&& !done
) {
386 * We hit the last page and there is more work to be done: wrap
387 * back to the start of the file
391 end
= writeback_index
- 1;
395 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
396 mapping
->writeback_index
= done_index
;
403 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
404 * @mapping: The mapping to write
405 * @wbc: The writeback control
409 static int gfs2_jdata_writepages(struct address_space
*mapping
,
410 struct writeback_control
*wbc
)
412 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
413 struct gfs2_sbd
*sdp
= GFS2_SB(mapping
->host
);
416 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
417 if (ret
== 0 && wbc
->sync_mode
== WB_SYNC_ALL
) {
418 gfs2_log_flush(sdp
, ip
->i_gl
, GFS2_LOG_HEAD_FLUSH_NORMAL
|
419 GFS2_LFC_JDATA_WPAGES
);
420 ret
= gfs2_write_cache_jdata(mapping
, wbc
);
426 * stuffed_readpage - Fill in a Linux page with stuffed file data
432 static int stuffed_readpage(struct gfs2_inode
*ip
, struct page
*page
)
434 struct buffer_head
*dibh
;
435 u64 dsize
= i_size_read(&ip
->i_inode
);
440 * Due to the order of unstuffing files and ->fault(), we can be
441 * asked for a zero page in the case of a stuffed file being extended,
442 * so we need to supply one here. It doesn't happen often.
444 if (unlikely(page
->index
)) {
445 zero_user(page
, 0, PAGE_SIZE
);
446 SetPageUptodate(page
);
450 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
454 kaddr
= kmap_atomic(page
);
455 if (dsize
> gfs2_max_stuffed_size(ip
))
456 dsize
= gfs2_max_stuffed_size(ip
);
457 memcpy(kaddr
, dibh
->b_data
+ sizeof(struct gfs2_dinode
), dsize
);
458 memset(kaddr
+ dsize
, 0, PAGE_SIZE
- dsize
);
459 kunmap_atomic(kaddr
);
460 flush_dcache_page(page
);
462 SetPageUptodate(page
);
468 static int __gfs2_readpage(void *file
, struct page
*page
)
470 struct inode
*inode
= page
->mapping
->host
;
471 struct gfs2_inode
*ip
= GFS2_I(inode
);
472 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
475 if (!gfs2_is_jdata(ip
) ||
476 (i_blocksize(inode
) == PAGE_SIZE
&& !page_has_buffers(page
))) {
477 error
= iomap_readpage(page
, &gfs2_iomap_ops
);
478 } else if (gfs2_is_stuffed(ip
)) {
479 error
= stuffed_readpage(ip
, page
);
482 error
= mpage_readpage(page
, gfs2_block_map
);
485 if (unlikely(gfs2_withdrawn(sdp
)))
492 * gfs2_readpage - read a page of a file
493 * @file: The file to read
494 * @page: The page of the file
497 static int gfs2_readpage(struct file
*file
, struct page
*page
)
499 return __gfs2_readpage(file
, page
);
503 * gfs2_internal_read - read an internal file
504 * @ip: The gfs2 inode
505 * @buf: The buffer to fill
506 * @pos: The file position
507 * @size: The amount to read
511 int gfs2_internal_read(struct gfs2_inode
*ip
, char *buf
, loff_t
*pos
,
514 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
515 unsigned long index
= *pos
>> PAGE_SHIFT
;
516 unsigned offset
= *pos
& (PAGE_SIZE
- 1);
524 if (offset
+ size
> PAGE_SIZE
)
525 amt
= PAGE_SIZE
- offset
;
526 page
= read_cache_page(mapping
, index
, __gfs2_readpage
, NULL
);
528 return PTR_ERR(page
);
529 p
= kmap_atomic(page
);
530 memcpy(buf
+ copied
, p
+ offset
, amt
);
536 } while(copied
< size
);
542 * gfs2_readahead - Read a bunch of pages at once
543 * @rac: Read-ahead control structure
546 * 1. This is only for readahead, so we can simply ignore any things
547 * which are slightly inconvenient (such as locking conflicts between
548 * the page lock and the glock) and return having done no I/O. Its
549 * obviously not something we'd want to do on too regular a basis.
550 * Any I/O we ignore at this time will be done via readpage later.
551 * 2. We don't handle stuffed files here we let readpage do the honours.
552 * 3. mpage_readahead() does most of the heavy lifting in the common case.
553 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
556 static void gfs2_readahead(struct readahead_control
*rac
)
558 struct inode
*inode
= rac
->mapping
->host
;
559 struct gfs2_inode
*ip
= GFS2_I(inode
);
561 if (gfs2_is_stuffed(ip
))
563 else if (gfs2_is_jdata(ip
))
564 mpage_readahead(rac
, gfs2_block_map
);
566 iomap_readahead(rac
, &gfs2_iomap_ops
);
570 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
571 * @inode: the rindex inode
573 void adjust_fs_space(struct inode
*inode
)
575 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
576 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
577 struct gfs2_statfs_change_host
*m_sc
= &sdp
->sd_statfs_master
;
578 struct gfs2_statfs_change_host
*l_sc
= &sdp
->sd_statfs_local
;
579 struct buffer_head
*m_bh
;
580 u64 fs_total
, new_free
;
582 if (gfs2_trans_begin(sdp
, 2 * RES_STATFS
, 0) != 0)
585 /* Total up the file system space, according to the latest rindex. */
586 fs_total
= gfs2_ri_total(sdp
);
587 if (gfs2_meta_inode_buffer(m_ip
, &m_bh
) != 0)
590 spin_lock(&sdp
->sd_statfs_spin
);
591 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
592 sizeof(struct gfs2_dinode
));
593 if (fs_total
> (m_sc
->sc_total
+ l_sc
->sc_total
))
594 new_free
= fs_total
- (m_sc
->sc_total
+ l_sc
->sc_total
);
597 spin_unlock(&sdp
->sd_statfs_spin
);
598 fs_warn(sdp
, "File system extended by %llu blocks.\n",
599 (unsigned long long)new_free
);
600 gfs2_statfs_change(sdp
, new_free
, new_free
, 0);
602 update_statfs(sdp
, m_bh
);
605 sdp
->sd_rindex_uptodate
= 0;
610 * jdata_set_page_dirty - Page dirtying function
611 * @page: The page to dirty
613 * Returns: 1 if it dirtyed the page, or 0 otherwise
616 static int jdata_set_page_dirty(struct page
*page
)
618 if (current
->journal_info
)
619 SetPageChecked(page
);
620 return __set_page_dirty_buffers(page
);
624 * gfs2_bmap - Block map function
625 * @mapping: Address space info
626 * @lblock: The block to map
628 * Returns: The disk address for the block or 0 on hole or error
631 static sector_t
gfs2_bmap(struct address_space
*mapping
, sector_t lblock
)
633 struct gfs2_inode
*ip
= GFS2_I(mapping
->host
);
634 struct gfs2_holder i_gh
;
638 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
642 if (!gfs2_is_stuffed(ip
))
643 dblock
= iomap_bmap(mapping
, lblock
, &gfs2_iomap_ops
);
645 gfs2_glock_dq_uninit(&i_gh
);
650 static void gfs2_discard(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
652 struct gfs2_bufdata
*bd
;
656 clear_buffer_dirty(bh
);
659 if (!list_empty(&bd
->bd_list
) && !buffer_pinned(bh
))
660 list_del_init(&bd
->bd_list
);
662 spin_lock(&sdp
->sd_ail_lock
);
663 gfs2_remove_from_journal(bh
, REMOVE_JDATA
);
664 spin_unlock(&sdp
->sd_ail_lock
);
668 clear_buffer_mapped(bh
);
669 clear_buffer_req(bh
);
670 clear_buffer_new(bh
);
671 gfs2_log_unlock(sdp
);
675 static void gfs2_invalidatepage(struct page
*page
, unsigned int offset
,
678 struct gfs2_sbd
*sdp
= GFS2_SB(page
->mapping
->host
);
679 unsigned int stop
= offset
+ length
;
680 int partial_page
= (offset
|| length
< PAGE_SIZE
);
681 struct buffer_head
*bh
, *head
;
682 unsigned long pos
= 0;
684 BUG_ON(!PageLocked(page
));
686 ClearPageChecked(page
);
687 if (!page_has_buffers(page
))
690 bh
= head
= page_buffers(page
);
692 if (pos
+ bh
->b_size
> stop
)
696 gfs2_discard(sdp
, bh
);
698 bh
= bh
->b_this_page
;
699 } while (bh
!= head
);
702 try_to_release_page(page
, 0);
706 * gfs2_releasepage - free the metadata associated with a page
707 * @page: the page that's being released
708 * @gfp_mask: passed from Linux VFS, ignored by us
710 * Calls try_to_free_buffers() to free the buffers and put the page if the
711 * buffers can be released.
713 * Returns: 1 if the page was put or else 0
716 int gfs2_releasepage(struct page
*page
, gfp_t gfp_mask
)
718 struct address_space
*mapping
= page
->mapping
;
719 struct gfs2_sbd
*sdp
= gfs2_mapping2sbd(mapping
);
720 struct buffer_head
*bh
, *head
;
721 struct gfs2_bufdata
*bd
;
723 if (!page_has_buffers(page
))
727 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
728 * clean pages might not have had the dirty bit cleared. Thus, it can
729 * send actual dirty pages to ->releasepage() via shrink_active_list().
731 * As a workaround, we skip pages that contain dirty buffers below.
732 * Once ->releasepage isn't called on dirty pages anymore, we can warn
733 * on dirty buffers like we used to here again.
737 head
= bh
= page_buffers(page
);
739 if (atomic_read(&bh
->b_count
))
744 if (buffer_dirty(bh
) || WARN_ON(buffer_pinned(bh
)))
746 bh
= bh
->b_this_page
;
749 head
= bh
= page_buffers(page
);
753 gfs2_assert_warn(sdp
, bd
->bd_bh
== bh
);
755 bh
->b_private
= NULL
;
757 * The bd may still be queued as a revoke, in which
758 * case we must not dequeue nor free it.
760 if (!bd
->bd_blkno
&& !list_empty(&bd
->bd_list
))
761 list_del_init(&bd
->bd_list
);
762 if (list_empty(&bd
->bd_list
))
763 kmem_cache_free(gfs2_bufdata_cachep
, bd
);
766 bh
= bh
->b_this_page
;
767 } while (bh
!= head
);
768 gfs2_log_unlock(sdp
);
770 return try_to_free_buffers(page
);
773 gfs2_log_unlock(sdp
);
777 static const struct address_space_operations gfs2_aops
= {
778 .writepage
= gfs2_writepage
,
779 .writepages
= gfs2_writepages
,
780 .readpage
= gfs2_readpage
,
781 .readahead
= gfs2_readahead
,
782 .set_page_dirty
= __set_page_dirty_nobuffers
,
783 .releasepage
= iomap_releasepage
,
784 .invalidatepage
= iomap_invalidatepage
,
786 .direct_IO
= noop_direct_IO
,
787 .migratepage
= iomap_migrate_page
,
788 .is_partially_uptodate
= iomap_is_partially_uptodate
,
789 .error_remove_page
= generic_error_remove_page
,
792 static const struct address_space_operations gfs2_jdata_aops
= {
793 .writepage
= gfs2_jdata_writepage
,
794 .writepages
= gfs2_jdata_writepages
,
795 .readpage
= gfs2_readpage
,
796 .readahead
= gfs2_readahead
,
797 .set_page_dirty
= jdata_set_page_dirty
,
799 .invalidatepage
= gfs2_invalidatepage
,
800 .releasepage
= gfs2_releasepage
,
801 .is_partially_uptodate
= block_is_partially_uptodate
,
802 .error_remove_page
= generic_error_remove_page
,
805 void gfs2_set_aops(struct inode
*inode
)
807 if (gfs2_is_jdata(GFS2_I(inode
)))
808 inode
->i_mapping
->a_ops
= &gfs2_jdata_aops
;
810 inode
->i_mapping
->a_ops
= &gfs2_aops
;