2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_reflink.h"
35 #include <linux/gfp.h>
36 #include <linux/mpage.h>
37 #include <linux/pagevec.h>
38 #include <linux/writeback.h>
41 * structure owned by writepages passed to individual writepage calls
43 struct xfs_writepage_ctx
{
44 struct xfs_bmbt_irec imap
;
47 struct xfs_ioend
*ioend
;
57 struct buffer_head
*bh
, *head
;
59 *delalloc
= *unwritten
= 0;
61 bh
= head
= page_buffers(page
);
63 if (buffer_unwritten(bh
))
65 else if (buffer_delay(bh
))
67 } while ((bh
= bh
->b_this_page
) != head
);
71 xfs_find_bdev_for_inode(
74 struct xfs_inode
*ip
= XFS_I(inode
);
75 struct xfs_mount
*mp
= ip
->i_mount
;
77 if (XFS_IS_REALTIME_INODE(ip
))
78 return mp
->m_rtdev_targp
->bt_bdev
;
80 return mp
->m_ddev_targp
->bt_bdev
;
84 * We're now finished for good with this page. Update the page state via the
85 * associated buffer_heads, paying attention to the start and end offsets that
86 * we need to process on the page.
88 * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
89 * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
90 * the page at all, as we may be racing with memory reclaim and it can free both
91 * the bufferhead chain and the page as it will see the page as clean and
95 xfs_finish_page_writeback(
100 unsigned int end
= bvec
->bv_offset
+ bvec
->bv_len
- 1;
101 struct buffer_head
*head
, *bh
, *next
;
102 unsigned int off
= 0;
105 ASSERT(bvec
->bv_offset
< PAGE_SIZE
);
106 ASSERT((bvec
->bv_offset
& ((1 << inode
->i_blkbits
) - 1)) == 0);
107 ASSERT(end
< PAGE_SIZE
);
108 ASSERT((bvec
->bv_len
& ((1 << inode
->i_blkbits
) - 1)) == 0);
110 bh
= head
= page_buffers(bvec
->bv_page
);
114 next
= bh
->b_this_page
;
115 if (off
< bvec
->bv_offset
)
119 bh
->b_end_io(bh
, !error
);
122 } while ((bh
= next
) != head
);
126 * We're now finished for good with this ioend structure. Update the page
127 * state, release holds on bios, and finally free up memory. Do not use the
132 struct xfs_ioend
*ioend
,
135 struct inode
*inode
= ioend
->io_inode
;
136 struct bio
*last
= ioend
->io_bio
;
137 struct bio
*bio
, *next
;
139 for (bio
= &ioend
->io_inline_bio
; bio
; bio
= next
) {
140 struct bio_vec
*bvec
;
144 * For the last bio, bi_private points to the ioend, so we
145 * need to explicitly end the iteration here.
150 next
= bio
->bi_private
;
152 /* walk each page on bio, ending page IO on them */
153 bio_for_each_segment_all(bvec
, bio
, i
)
154 xfs_finish_page_writeback(inode
, bvec
, error
);
161 * Fast and loose check if this write could update the on-disk inode size.
163 static inline bool xfs_ioend_is_append(struct xfs_ioend
*ioend
)
165 return ioend
->io_offset
+ ioend
->io_size
>
166 XFS_I(ioend
->io_inode
)->i_d
.di_size
;
170 xfs_setfilesize_trans_alloc(
171 struct xfs_ioend
*ioend
)
173 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
174 struct xfs_trans
*tp
;
177 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
);
181 ioend
->io_append_trans
= tp
;
184 * We may pass freeze protection with a transaction. So tell lockdep
187 __sb_writers_release(ioend
->io_inode
->i_sb
, SB_FREEZE_FS
);
189 * We hand off the transaction to the completion thread now, so
190 * clear the flag here.
192 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
197 * Update on-disk file size now that data has been written to disk.
201 struct xfs_inode
*ip
,
202 struct xfs_trans
*tp
,
208 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
209 isize
= xfs_new_eof(ip
, offset
+ size
);
211 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
212 xfs_trans_cancel(tp
);
216 trace_xfs_setfilesize(ip
, offset
, size
);
218 ip
->i_d
.di_size
= isize
;
219 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
220 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
222 return xfs_trans_commit(tp
);
227 struct xfs_inode
*ip
,
231 struct xfs_mount
*mp
= ip
->i_mount
;
232 struct xfs_trans
*tp
;
235 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
);
239 return __xfs_setfilesize(ip
, tp
, offset
, size
);
243 xfs_setfilesize_ioend(
244 struct xfs_ioend
*ioend
,
247 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
248 struct xfs_trans
*tp
= ioend
->io_append_trans
;
251 * The transaction may have been allocated in the I/O submission thread,
252 * thus we need to mark ourselves as being in a transaction manually.
253 * Similarly for freeze protection.
255 current_set_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
256 __sb_writers_acquired(VFS_I(ip
)->i_sb
, SB_FREEZE_FS
);
258 /* we abort the update if there was an IO error */
260 xfs_trans_cancel(tp
);
264 return __xfs_setfilesize(ip
, tp
, ioend
->io_offset
, ioend
->io_size
);
268 * IO write completion.
272 struct work_struct
*work
)
274 struct xfs_ioend
*ioend
=
275 container_of(work
, struct xfs_ioend
, io_work
);
276 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
277 xfs_off_t offset
= ioend
->io_offset
;
278 size_t size
= ioend
->io_size
;
279 int error
= ioend
->io_bio
->bi_error
;
282 * Just clean up the in-memory strutures if the fs has been shut down.
284 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
290 * Clean up any COW blocks on an I/O error.
292 if (unlikely(error
)) {
293 switch (ioend
->io_type
) {
295 xfs_reflink_cancel_cow_range(ip
, offset
, size
, true);
303 * Success: commit the COW or unwritten blocks if needed.
305 switch (ioend
->io_type
) {
307 error
= xfs_reflink_end_cow(ip
, offset
, size
);
309 case XFS_IO_UNWRITTEN
:
310 error
= xfs_iomap_write_unwritten(ip
, offset
, size
);
313 ASSERT(!xfs_ioend_is_append(ioend
) || ioend
->io_append_trans
);
318 if (ioend
->io_append_trans
)
319 error
= xfs_setfilesize_ioend(ioend
, error
);
320 xfs_destroy_ioend(ioend
, error
);
327 struct xfs_ioend
*ioend
= bio
->bi_private
;
328 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
330 if (ioend
->io_type
== XFS_IO_UNWRITTEN
|| ioend
->io_type
== XFS_IO_COW
)
331 queue_work(mp
->m_unwritten_workqueue
, &ioend
->io_work
);
332 else if (ioend
->io_append_trans
)
333 queue_work(mp
->m_data_workqueue
, &ioend
->io_work
);
335 xfs_destroy_ioend(ioend
, bio
->bi_error
);
342 struct xfs_bmbt_irec
*imap
,
345 struct xfs_inode
*ip
= XFS_I(inode
);
346 struct xfs_mount
*mp
= ip
->i_mount
;
347 ssize_t count
= 1 << inode
->i_blkbits
;
348 xfs_fileoff_t offset_fsb
, end_fsb
;
350 int bmapi_flags
= XFS_BMAPI_ENTIRE
;
353 if (XFS_FORCED_SHUTDOWN(mp
))
356 ASSERT(type
!= XFS_IO_COW
);
357 if (type
== XFS_IO_UNWRITTEN
)
358 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
360 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
361 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
362 (ip
->i_df
.if_flags
& XFS_IFEXTENTS
));
363 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
365 if (offset
+ count
> mp
->m_super
->s_maxbytes
)
366 count
= mp
->m_super
->s_maxbytes
- offset
;
367 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
368 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
369 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
370 imap
, &nimaps
, bmapi_flags
);
372 * Truncate an overwrite extent if there's a pending CoW
373 * reservation before the end of this extent. This forces us
374 * to come back to writepage to take care of the CoW.
376 if (nimaps
&& type
== XFS_IO_OVERWRITE
)
377 xfs_reflink_trim_irec_to_next_cow(ip
, offset_fsb
, imap
);
378 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
383 if (type
== XFS_IO_DELALLOC
&&
384 (!nimaps
|| isnullstartblock(imap
->br_startblock
))) {
385 error
= xfs_iomap_write_allocate(ip
, XFS_DATA_FORK
, offset
,
388 trace_xfs_map_blocks_alloc(ip
, offset
, count
, type
, imap
);
393 if (type
== XFS_IO_UNWRITTEN
) {
395 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
396 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
400 trace_xfs_map_blocks_found(ip
, offset
, count
, type
, imap
);
407 struct xfs_bmbt_irec
*imap
,
410 offset
>>= inode
->i_blkbits
;
412 return offset
>= imap
->br_startoff
&&
413 offset
< imap
->br_startoff
+ imap
->br_blockcount
;
417 xfs_start_buffer_writeback(
418 struct buffer_head
*bh
)
420 ASSERT(buffer_mapped(bh
));
421 ASSERT(buffer_locked(bh
));
422 ASSERT(!buffer_delay(bh
));
423 ASSERT(!buffer_unwritten(bh
));
425 mark_buffer_async_write(bh
);
426 set_buffer_uptodate(bh
);
427 clear_buffer_dirty(bh
);
431 xfs_start_page_writeback(
435 ASSERT(PageLocked(page
));
436 ASSERT(!PageWriteback(page
));
439 * if the page was not fully cleaned, we need to ensure that the higher
440 * layers come back to it correctly. That means we need to keep the page
441 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
442 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
443 * write this page in this writeback sweep will be made.
446 clear_page_dirty_for_io(page
);
447 set_page_writeback(page
);
449 set_page_writeback_keepwrite(page
);
454 static inline int xfs_bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
456 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
460 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
461 * it, and we submit that bio. The ioend may be used for multiple bio
462 * submissions, so we only want to allocate an append transaction for the ioend
463 * once. In the case of multiple bio submission, each bio will take an IO
464 * reference to the ioend to ensure that the ioend completion is only done once
465 * all bios have been submitted and the ioend is really done.
467 * If @fail is non-zero, it means that we have a situation where some part of
468 * the submission process has failed after we have marked paged for writeback
469 * and unlocked them. In this situation, we need to fail the bio and ioend
470 * rather than submit it to IO. This typically only happens on a filesystem
475 struct writeback_control
*wbc
,
476 struct xfs_ioend
*ioend
,
479 /* Convert CoW extents to regular */
480 if (!status
&& ioend
->io_type
== XFS_IO_COW
) {
481 status
= xfs_reflink_convert_cow(XFS_I(ioend
->io_inode
),
482 ioend
->io_offset
, ioend
->io_size
);
485 /* Reserve log space if we might write beyond the on-disk inode size. */
487 ioend
->io_type
!= XFS_IO_UNWRITTEN
&&
488 xfs_ioend_is_append(ioend
) &&
489 !ioend
->io_append_trans
)
490 status
= xfs_setfilesize_trans_alloc(ioend
);
492 ioend
->io_bio
->bi_private
= ioend
;
493 ioend
->io_bio
->bi_end_io
= xfs_end_bio
;
494 ioend
->io_bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
497 * If we are failing the IO now, just mark the ioend with an
498 * error and finish it. This will run IO completion immediately
499 * as there is only one reference to the ioend at this point in
503 ioend
->io_bio
->bi_error
= status
;
504 bio_endio(ioend
->io_bio
);
508 submit_bio(ioend
->io_bio
);
513 xfs_init_bio_from_bh(
515 struct buffer_head
*bh
)
517 bio
->bi_iter
.bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
518 bio
->bi_bdev
= bh
->b_bdev
;
521 static struct xfs_ioend
*
526 struct buffer_head
*bh
)
528 struct xfs_ioend
*ioend
;
531 bio
= bio_alloc_bioset(GFP_NOFS
, BIO_MAX_PAGES
, xfs_ioend_bioset
);
532 xfs_init_bio_from_bh(bio
, bh
);
534 ioend
= container_of(bio
, struct xfs_ioend
, io_inline_bio
);
535 INIT_LIST_HEAD(&ioend
->io_list
);
536 ioend
->io_type
= type
;
537 ioend
->io_inode
= inode
;
539 ioend
->io_offset
= offset
;
540 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
541 ioend
->io_append_trans
= NULL
;
547 * Allocate a new bio, and chain the old bio to the new one.
549 * Note that we have to do perform the chaining in this unintuitive order
550 * so that the bi_private linkage is set up in the right direction for the
551 * traversal in xfs_destroy_ioend().
555 struct xfs_ioend
*ioend
,
556 struct writeback_control
*wbc
,
557 struct buffer_head
*bh
)
561 new = bio_alloc(GFP_NOFS
, BIO_MAX_PAGES
);
562 xfs_init_bio_from_bh(new, bh
);
564 bio_chain(ioend
->io_bio
, new);
565 bio_get(ioend
->io_bio
); /* for xfs_destroy_ioend */
566 ioend
->io_bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
567 submit_bio(ioend
->io_bio
);
572 * Test to see if we've been building up a completion structure for
573 * earlier buffers -- if so, we try to append to this ioend if we
574 * can, otherwise we finish off any current ioend and start another.
575 * Return the ioend we finished off so that the caller can submit it
576 * once it has finished processing the dirty page.
581 struct buffer_head
*bh
,
583 struct xfs_writepage_ctx
*wpc
,
584 struct writeback_control
*wbc
,
585 struct list_head
*iolist
)
587 if (!wpc
->ioend
|| wpc
->io_type
!= wpc
->ioend
->io_type
||
588 bh
->b_blocknr
!= wpc
->last_block
+ 1 ||
589 offset
!= wpc
->ioend
->io_offset
+ wpc
->ioend
->io_size
) {
591 list_add(&wpc
->ioend
->io_list
, iolist
);
592 wpc
->ioend
= xfs_alloc_ioend(inode
, wpc
->io_type
, offset
, bh
);
596 * If the buffer doesn't fit into the bio we need to allocate a new
597 * one. This shouldn't happen more than once for a given buffer.
599 while (xfs_bio_add_buffer(wpc
->ioend
->io_bio
, bh
) != bh
->b_size
)
600 xfs_chain_bio(wpc
->ioend
, wbc
, bh
);
602 wpc
->ioend
->io_size
+= bh
->b_size
;
603 wpc
->last_block
= bh
->b_blocknr
;
604 xfs_start_buffer_writeback(bh
);
610 struct buffer_head
*bh
,
611 struct xfs_bmbt_irec
*imap
,
615 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
616 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, imap
->br_startoff
);
617 xfs_daddr_t iomap_bn
= xfs_fsb_to_db(XFS_I(inode
), imap
->br_startblock
);
619 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
620 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
622 bn
= (iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
)) +
623 ((offset
- iomap_offset
) >> inode
->i_blkbits
);
625 ASSERT(bn
|| XFS_IS_REALTIME_INODE(XFS_I(inode
)));
628 set_buffer_mapped(bh
);
634 struct buffer_head
*bh
,
635 struct xfs_bmbt_irec
*imap
,
638 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
639 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
641 xfs_map_buffer(inode
, bh
, imap
, offset
);
642 set_buffer_mapped(bh
);
643 clear_buffer_delay(bh
);
644 clear_buffer_unwritten(bh
);
648 * Test if a given page contains at least one buffer of a given @type.
649 * If @check_all_buffers is true, then we walk all the buffers in the page to
650 * try to find one of the type passed in. If it is not set, then the caller only
651 * needs to check the first buffer on the page for a match.
657 bool check_all_buffers
)
659 struct buffer_head
*bh
;
660 struct buffer_head
*head
;
662 if (PageWriteback(page
))
666 if (!page_has_buffers(page
))
669 bh
= head
= page_buffers(page
);
671 if (buffer_unwritten(bh
)) {
672 if (type
== XFS_IO_UNWRITTEN
)
674 } else if (buffer_delay(bh
)) {
675 if (type
== XFS_IO_DELALLOC
)
677 } else if (buffer_dirty(bh
) && buffer_mapped(bh
)) {
678 if (type
== XFS_IO_OVERWRITE
)
682 /* If we are only checking the first buffer, we are done now. */
683 if (!check_all_buffers
)
685 } while ((bh
= bh
->b_this_page
) != head
);
691 xfs_vm_invalidatepage(
696 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
,
698 block_invalidatepage(page
, offset
, length
);
702 * If the page has delalloc buffers on it, we need to punch them out before we
703 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
704 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
705 * is done on that same region - the delalloc extent is returned when none is
706 * supposed to be there.
708 * We prevent this by truncating away the delalloc regions on the page before
709 * invalidating it. Because they are delalloc, we can do this without needing a
710 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
711 * truncation without a transaction as there is no space left for block
712 * reservation (typically why we see a ENOSPC in writeback).
714 * This is not a performance critical path, so for now just do the punching a
715 * buffer head at a time.
718 xfs_aops_discard_page(
721 struct inode
*inode
= page
->mapping
->host
;
722 struct xfs_inode
*ip
= XFS_I(inode
);
723 struct buffer_head
*bh
, *head
;
724 loff_t offset
= page_offset(page
);
726 if (!xfs_check_page_type(page
, XFS_IO_DELALLOC
, true))
729 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
732 xfs_alert(ip
->i_mount
,
733 "page discard on page %p, inode 0x%llx, offset %llu.",
734 page
, ip
->i_ino
, offset
);
736 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
737 bh
= head
= page_buffers(page
);
740 xfs_fileoff_t start_fsb
;
742 if (!buffer_delay(bh
))
745 start_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
746 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
, 1);
748 /* something screwed, just bail */
749 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
750 xfs_alert(ip
->i_mount
,
751 "page discard unable to remove delalloc mapping.");
756 offset
+= 1 << inode
->i_blkbits
;
758 } while ((bh
= bh
->b_this_page
) != head
);
760 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
762 xfs_vm_invalidatepage(page
, 0, PAGE_SIZE
);
768 struct xfs_writepage_ctx
*wpc
,
771 unsigned int *new_type
)
773 struct xfs_inode
*ip
= XFS_I(inode
);
774 struct xfs_bmbt_irec imap
;
779 * If we already have a valid COW mapping keep using it.
781 if (wpc
->io_type
== XFS_IO_COW
) {
782 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
, offset
);
783 if (wpc
->imap_valid
) {
784 *new_type
= XFS_IO_COW
;
790 * Else we need to check if there is a COW mapping at this offset.
792 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
793 is_cow
= xfs_reflink_find_cow_mapping(ip
, offset
, &imap
);
794 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
800 * And if the COW mapping has a delayed extent here we need to
801 * allocate real space for it now.
803 if (isnullstartblock(imap
.br_startblock
)) {
804 error
= xfs_iomap_write_allocate(ip
, XFS_COW_FORK
, offset
,
810 wpc
->io_type
= *new_type
= XFS_IO_COW
;
811 wpc
->imap_valid
= true;
817 * We implement an immediate ioend submission policy here to avoid needing to
818 * chain multiple ioends and hence nest mempool allocations which can violate
819 * forward progress guarantees we need to provide. The current ioend we are
820 * adding buffers to is cached on the writepage context, and if the new buffer
821 * does not append to the cached ioend it will create a new ioend and cache that
824 * If a new ioend is created and cached, the old ioend is returned and queued
825 * locally for submission once the entire page is processed or an error has been
826 * detected. While ioends are submitted immediately after they are completed,
827 * batching optimisations are provided by higher level block plugging.
829 * At the end of a writeback pass, there will be a cached ioend remaining on the
830 * writepage context that the caller will need to submit.
834 struct xfs_writepage_ctx
*wpc
,
835 struct writeback_control
*wbc
,
839 __uint64_t end_offset
)
841 LIST_HEAD(submit_list
);
842 struct xfs_ioend
*ioend
, *next
;
843 struct buffer_head
*bh
, *head
;
844 ssize_t len
= 1 << inode
->i_blkbits
;
848 unsigned int new_type
;
850 bh
= head
= page_buffers(page
);
851 offset
= page_offset(page
);
853 if (offset
>= end_offset
)
855 if (!buffer_uptodate(bh
))
859 * set_page_dirty dirties all buffers in a page, independent
860 * of their state. The dirty state however is entirely
861 * meaningless for holes (!mapped && uptodate), so skip
862 * buffers covering holes here.
864 if (!buffer_mapped(bh
) && buffer_uptodate(bh
)) {
865 wpc
->imap_valid
= false;
869 if (buffer_unwritten(bh
))
870 new_type
= XFS_IO_UNWRITTEN
;
871 else if (buffer_delay(bh
))
872 new_type
= XFS_IO_DELALLOC
;
873 else if (buffer_uptodate(bh
))
874 new_type
= XFS_IO_OVERWRITE
;
876 if (PageUptodate(page
))
877 ASSERT(buffer_mapped(bh
));
879 * This buffer is not uptodate and will not be
880 * written to disk. Ensure that we will put any
881 * subsequent writeable buffers into a new
884 wpc
->imap_valid
= false;
888 if (xfs_is_reflink_inode(XFS_I(inode
))) {
889 error
= xfs_map_cow(wpc
, inode
, offset
, &new_type
);
894 if (wpc
->io_type
!= new_type
) {
895 wpc
->io_type
= new_type
;
896 wpc
->imap_valid
= false;
900 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
,
902 if (!wpc
->imap_valid
) {
903 error
= xfs_map_blocks(inode
, offset
, &wpc
->imap
,
907 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
,
910 if (wpc
->imap_valid
) {
912 if (wpc
->io_type
!= XFS_IO_OVERWRITE
)
913 xfs_map_at_offset(inode
, bh
, &wpc
->imap
, offset
);
914 xfs_add_to_ioend(inode
, bh
, offset
, wpc
, wbc
, &submit_list
);
918 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
920 if (uptodate
&& bh
== head
)
921 SetPageUptodate(page
);
923 ASSERT(wpc
->ioend
|| list_empty(&submit_list
));
927 * On error, we have to fail the ioend here because we have locked
928 * buffers in the ioend. If we don't do this, we'll deadlock
929 * invalidating the page as that tries to lock the buffers on the page.
930 * Also, because we may have set pages under writeback, we have to make
931 * sure we run IO completion to mark the error state of the IO
932 * appropriately, so we can't cancel the ioend directly here. That means
933 * we have to mark this page as under writeback if we included any
934 * buffers from it in the ioend chain so that completion treats it
937 * If we didn't include the page in the ioend, the on error we can
938 * simply discard and unlock it as there are no other users of the page
939 * or it's buffers right now. The caller will still need to trigger
940 * submission of outstanding ioends on the writepage context so they are
941 * treated correctly on error.
944 xfs_start_page_writeback(page
, !error
);
947 * Preserve the original error if there was one, otherwise catch
948 * submission errors here and propagate into subsequent ioend
951 list_for_each_entry_safe(ioend
, next
, &submit_list
, io_list
) {
954 list_del_init(&ioend
->io_list
);
955 error2
= xfs_submit_ioend(wbc
, ioend
, error
);
956 if (error2
&& !error
)
960 xfs_aops_discard_page(page
);
961 ClearPageUptodate(page
);
965 * We can end up here with no error and nothing to write if we
966 * race with a partial page truncate on a sub-page block sized
967 * filesystem. In that case we need to mark the page clean.
969 xfs_start_page_writeback(page
, 1);
970 end_page_writeback(page
);
973 mapping_set_error(page
->mapping
, error
);
978 * Write out a dirty page.
980 * For delalloc space on the page we need to allocate space and flush it.
981 * For unwritten space on the page we need to start the conversion to
982 * regular allocated space.
983 * For any other dirty buffer heads on the page we should flush them.
988 struct writeback_control
*wbc
,
991 struct xfs_writepage_ctx
*wpc
= data
;
992 struct inode
*inode
= page
->mapping
->host
;
994 __uint64_t end_offset
;
997 trace_xfs_writepage(inode
, page
, 0, 0);
999 ASSERT(page_has_buffers(page
));
1002 * Refuse to write the page out if we are called from reclaim context.
1004 * This avoids stack overflows when called from deeply used stacks in
1005 * random callers for direct reclaim or memcg reclaim. We explicitly
1006 * allow reclaim from kswapd as the stack usage there is relatively low.
1008 * This should never happen except in the case of a VM regression so
1011 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
1016 * Given that we do not allow direct reclaim to call us, we should
1017 * never be called while in a filesystem transaction.
1019 if (WARN_ON_ONCE(current
->flags
& PF_FSTRANS
))
1023 * Is this page beyond the end of the file?
1025 * The page index is less than the end_index, adjust the end_offset
1026 * to the highest offset that this page should represent.
1027 * -----------------------------------------------------
1028 * | file mapping | <EOF> |
1029 * -----------------------------------------------------
1030 * | Page ... | Page N-2 | Page N-1 | Page N | |
1031 * ^--------------------------------^----------|--------
1032 * | desired writeback range | see else |
1033 * ---------------------------------^------------------|
1035 offset
= i_size_read(inode
);
1036 end_index
= offset
>> PAGE_SHIFT
;
1037 if (page
->index
< end_index
)
1038 end_offset
= (xfs_off_t
)(page
->index
+ 1) << PAGE_SHIFT
;
1041 * Check whether the page to write out is beyond or straddles
1043 * -------------------------------------------------------
1044 * | file mapping | <EOF> |
1045 * -------------------------------------------------------
1046 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1047 * ^--------------------------------^-----------|---------
1049 * ---------------------------------^-----------|--------|
1051 unsigned offset_into_page
= offset
& (PAGE_SIZE
- 1);
1054 * Skip the page if it is fully outside i_size, e.g. due to a
1055 * truncate operation that is in progress. We must redirty the
1056 * page so that reclaim stops reclaiming it. Otherwise
1057 * xfs_vm_releasepage() is called on it and gets confused.
1059 * Note that the end_index is unsigned long, it would overflow
1060 * if the given offset is greater than 16TB on 32-bit system
1061 * and if we do check the page is fully outside i_size or not
1062 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1063 * will be evaluated to 0. Hence this page will be redirtied
1064 * and be written out repeatedly which would result in an
1065 * infinite loop, the user program that perform this operation
1066 * will hang. Instead, we can verify this situation by checking
1067 * if the page to write is totally beyond the i_size or if it's
1068 * offset is just equal to the EOF.
1070 if (page
->index
> end_index
||
1071 (page
->index
== end_index
&& offset_into_page
== 0))
1075 * The page straddles i_size. It must be zeroed out on each
1076 * and every writepage invocation because it may be mmapped.
1077 * "A file is mapped in multiples of the page size. For a file
1078 * that is not a multiple of the page size, the remaining
1079 * memory is zeroed when mapped, and writes to that region are
1080 * not written out to the file."
1082 zero_user_segment(page
, offset_into_page
, PAGE_SIZE
);
1084 /* Adjust the end_offset to the end of file */
1085 end_offset
= offset
;
1088 return xfs_writepage_map(wpc
, wbc
, inode
, page
, offset
, end_offset
);
1091 redirty_page_for_writepage(wbc
, page
);
1099 struct writeback_control
*wbc
)
1101 struct xfs_writepage_ctx wpc
= {
1102 .io_type
= XFS_IO_INVALID
,
1106 ret
= xfs_do_writepage(page
, wbc
, &wpc
);
1108 ret
= xfs_submit_ioend(wbc
, wpc
.ioend
, ret
);
1114 struct address_space
*mapping
,
1115 struct writeback_control
*wbc
)
1117 struct xfs_writepage_ctx wpc
= {
1118 .io_type
= XFS_IO_INVALID
,
1122 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1123 if (dax_mapping(mapping
))
1124 return dax_writeback_mapping_range(mapping
,
1125 xfs_find_bdev_for_inode(mapping
->host
), wbc
);
1127 ret
= write_cache_pages(mapping
, wbc
, xfs_do_writepage
, &wpc
);
1129 ret
= xfs_submit_ioend(wbc
, wpc
.ioend
, ret
);
1134 * Called to move a page into cleanable state - and from there
1135 * to be released. The page should already be clean. We always
1136 * have buffer heads in this call.
1138 * Returns 1 if the page is ok to release, 0 otherwise.
1145 int delalloc
, unwritten
;
1147 trace_xfs_releasepage(page
->mapping
->host
, page
, 0, 0);
1150 * mm accommodates an old ext3 case where clean pages might not have had
1151 * the dirty bit cleared. Thus, it can send actual dirty pages to
1152 * ->releasepage() via shrink_active_list(). Conversely,
1153 * block_invalidatepage() can send pages that are still marked dirty
1154 * but otherwise have invalidated buffers.
1156 * We want to release the latter to avoid unnecessary buildup of the
1157 * LRU, skip the former and warn if we've left any lingering
1158 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
1159 * or unwritten buffers and warn if the page is not dirty. Otherwise
1160 * try to release the buffers.
1162 xfs_count_page_state(page
, &delalloc
, &unwritten
);
1165 WARN_ON_ONCE(!PageDirty(page
));
1169 WARN_ON_ONCE(!PageDirty(page
));
1173 return try_to_free_buffers(page
);
1177 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1178 * is, so that we can avoid repeated get_blocks calls.
1180 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1181 * for blocks beyond EOF must be marked new so that sub block regions can be
1182 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1183 * was just allocated or is unwritten, otherwise the callers would overwrite
1184 * existing data with zeros. Hence we have to split the mapping into a range up
1185 * to and including EOF, and a second mapping for beyond EOF.
1189 struct inode
*inode
,
1191 struct buffer_head
*bh_result
,
1192 struct xfs_bmbt_irec
*imap
,
1196 xfs_off_t mapping_size
;
1198 mapping_size
= imap
->br_startoff
+ imap
->br_blockcount
- iblock
;
1199 mapping_size
<<= inode
->i_blkbits
;
1201 ASSERT(mapping_size
> 0);
1202 if (mapping_size
> size
)
1203 mapping_size
= size
;
1204 if (offset
< i_size_read(inode
) &&
1205 offset
+ mapping_size
>= i_size_read(inode
)) {
1206 /* limit mapping to block that spans EOF */
1207 mapping_size
= roundup_64(i_size_read(inode
) - offset
,
1208 1 << inode
->i_blkbits
);
1210 if (mapping_size
> LONG_MAX
)
1211 mapping_size
= LONG_MAX
;
1213 bh_result
->b_size
= mapping_size
;
1218 struct inode
*inode
,
1220 struct buffer_head
*bh_result
,
1223 struct xfs_inode
*ip
= XFS_I(inode
);
1224 struct xfs_mount
*mp
= ip
->i_mount
;
1225 xfs_fileoff_t offset_fsb
, end_fsb
;
1228 struct xfs_bmbt_irec imap
;
1235 if (XFS_FORCED_SHUTDOWN(mp
))
1238 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1239 ASSERT(bh_result
->b_size
>= (1 << inode
->i_blkbits
));
1240 size
= bh_result
->b_size
;
1242 if (offset
>= i_size_read(inode
))
1246 * Direct I/O is usually done on preallocated files, so try getting
1247 * a block mapping without an exclusive lock first.
1249 lockmode
= xfs_ilock_data_map_shared(ip
);
1251 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
1252 if (offset
+ size
> mp
->m_super
->s_maxbytes
)
1253 size
= mp
->m_super
->s_maxbytes
- offset
;
1254 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ size
);
1255 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1257 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
1258 &imap
, &nimaps
, XFS_BMAPI_ENTIRE
);
1263 trace_xfs_get_blocks_found(ip
, offset
, size
,
1264 ISUNWRITTEN(&imap
) ? XFS_IO_UNWRITTEN
1265 : XFS_IO_OVERWRITE
, &imap
);
1266 xfs_iunlock(ip
, lockmode
);
1268 trace_xfs_get_blocks_notfound(ip
, offset
, size
);
1272 /* trim mapping down to size requested */
1273 xfs_map_trim_size(inode
, iblock
, bh_result
, &imap
, offset
, size
);
1276 * For unwritten extents do not report a disk address in the buffered
1277 * read case (treat as if we're reading into a hole).
1279 if (imap
.br_startblock
!= HOLESTARTBLOCK
&&
1280 imap
.br_startblock
!= DELAYSTARTBLOCK
&&
1281 !ISUNWRITTEN(&imap
))
1282 xfs_map_buffer(inode
, bh_result
, &imap
, offset
);
1285 * If this is a realtime file, data may be on a different device.
1286 * to that pointed to from the buffer_head b_bdev currently.
1288 bh_result
->b_bdev
= xfs_find_bdev_for_inode(inode
);
1292 xfs_iunlock(ip
, lockmode
);
1299 struct iov_iter
*iter
)
1302 * We just need the method present so that open/fcntl allow direct I/O.
1309 struct address_space
*mapping
,
1312 struct inode
*inode
= (struct inode
*)mapping
->host
;
1313 struct xfs_inode
*ip
= XFS_I(inode
);
1315 trace_xfs_vm_bmap(XFS_I(inode
));
1318 * The swap code (ab-)uses ->bmap to get a block mapping and then
1319 * bypasseѕ the file system for actual I/O. We really can't allow
1320 * that on reflinks inodes, so we have to skip out here. And yes,
1321 * 0 is the magic code for a bmap error..
1323 if (xfs_is_reflink_inode(ip
))
1326 filemap_write_and_wait(mapping
);
1327 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1332 struct file
*unused
,
1335 trace_xfs_vm_readpage(page
->mapping
->host
, 1);
1336 return mpage_readpage(page
, xfs_get_blocks
);
1341 struct file
*unused
,
1342 struct address_space
*mapping
,
1343 struct list_head
*pages
,
1346 trace_xfs_vm_readpages(mapping
->host
, nr_pages
);
1347 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1351 * This is basically a copy of __set_page_dirty_buffers() with one
1352 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1353 * dirty, we'll never be able to clean them because we don't write buffers
1354 * beyond EOF, and that means we can't invalidate pages that span EOF
1355 * that have been marked dirty. Further, the dirty state can leak into
1356 * the file interior if the file is extended, resulting in all sorts of
1357 * bad things happening as the state does not match the underlying data.
1359 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1360 * this only exist because of bufferheads and how the generic code manages them.
1363 xfs_vm_set_page_dirty(
1366 struct address_space
*mapping
= page
->mapping
;
1367 struct inode
*inode
= mapping
->host
;
1372 if (unlikely(!mapping
))
1373 return !TestSetPageDirty(page
);
1375 end_offset
= i_size_read(inode
);
1376 offset
= page_offset(page
);
1378 spin_lock(&mapping
->private_lock
);
1379 if (page_has_buffers(page
)) {
1380 struct buffer_head
*head
= page_buffers(page
);
1381 struct buffer_head
*bh
= head
;
1384 if (offset
< end_offset
)
1385 set_buffer_dirty(bh
);
1386 bh
= bh
->b_this_page
;
1387 offset
+= 1 << inode
->i_blkbits
;
1388 } while (bh
!= head
);
1391 * Lock out page->mem_cgroup migration to keep PageDirty
1392 * synchronized with per-memcg dirty page counters.
1394 lock_page_memcg(page
);
1395 newly_dirty
= !TestSetPageDirty(page
);
1396 spin_unlock(&mapping
->private_lock
);
1399 /* sigh - __set_page_dirty() is static, so copy it here, too */
1400 unsigned long flags
;
1402 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
1403 if (page
->mapping
) { /* Race with truncate? */
1404 WARN_ON_ONCE(!PageUptodate(page
));
1405 account_page_dirtied(page
, mapping
);
1406 radix_tree_tag_set(&mapping
->page_tree
,
1407 page_index(page
), PAGECACHE_TAG_DIRTY
);
1409 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
1411 unlock_page_memcg(page
);
1413 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1417 const struct address_space_operations xfs_address_space_operations
= {
1418 .readpage
= xfs_vm_readpage
,
1419 .readpages
= xfs_vm_readpages
,
1420 .writepage
= xfs_vm_writepage
,
1421 .writepages
= xfs_vm_writepages
,
1422 .set_page_dirty
= xfs_vm_set_page_dirty
,
1423 .releasepage
= xfs_vm_releasepage
,
1424 .invalidatepage
= xfs_vm_invalidatepage
,
1425 .bmap
= xfs_vm_bmap
,
1426 .direct_IO
= xfs_vm_direct_IO
,
1427 .migratepage
= buffer_migrate_page
,
1428 .is_partially_uptodate
= block_is_partially_uptodate
,
1429 .error_remove_page
= generic_error_remove_page
,