2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_reflink.h"
35 #include <linux/gfp.h>
36 #include <linux/mpage.h>
37 #include <linux/pagevec.h>
38 #include <linux/writeback.h>
41 * structure owned by writepages passed to individual writepage calls
43 struct xfs_writepage_ctx
{
44 struct xfs_bmbt_irec imap
;
47 struct xfs_ioend
*ioend
;
57 struct buffer_head
*bh
, *head
;
59 *delalloc
= *unwritten
= 0;
61 bh
= head
= page_buffers(page
);
63 if (buffer_unwritten(bh
))
65 else if (buffer_delay(bh
))
67 } while ((bh
= bh
->b_this_page
) != head
);
71 xfs_find_bdev_for_inode(
74 struct xfs_inode
*ip
= XFS_I(inode
);
75 struct xfs_mount
*mp
= ip
->i_mount
;
77 if (XFS_IS_REALTIME_INODE(ip
))
78 return mp
->m_rtdev_targp
->bt_bdev
;
80 return mp
->m_ddev_targp
->bt_bdev
;
84 * We're now finished for good with this page. Update the page state via the
85 * associated buffer_heads, paying attention to the start and end offsets that
86 * we need to process on the page.
88 * Note that we open code the action in end_buffer_async_write here so that we
89 * only have to iterate over the buffers attached to the page once. This is not
90 * only more efficient, but also ensures that we only calls end_page_writeback
91 * at the end of the iteration, and thus avoids the pitfall of having the page
92 * and buffers potentially freed after every call to end_buffer_async_write.
95 xfs_finish_page_writeback(
100 struct buffer_head
*head
= page_buffers(bvec
->bv_page
), *bh
= head
;
102 unsigned int off
= 0;
105 ASSERT(bvec
->bv_offset
< PAGE_SIZE
);
106 ASSERT((bvec
->bv_offset
& (i_blocksize(inode
) - 1)) == 0);
107 ASSERT(bvec
->bv_offset
+ bvec
->bv_len
<= PAGE_SIZE
);
108 ASSERT((bvec
->bv_len
& (i_blocksize(inode
) - 1)) == 0);
110 local_irq_save(flags
);
111 bit_spin_lock(BH_Uptodate_Lock
, &head
->b_state
);
113 if (off
>= bvec
->bv_offset
&&
114 off
< bvec
->bv_offset
+ bvec
->bv_len
) {
115 ASSERT(buffer_async_write(bh
));
116 ASSERT(bh
->b_end_io
== NULL
);
119 mark_buffer_write_io_error(bh
);
120 clear_buffer_uptodate(bh
);
121 SetPageError(bvec
->bv_page
);
123 set_buffer_uptodate(bh
);
125 clear_buffer_async_write(bh
);
127 } else if (buffer_async_write(bh
)) {
128 ASSERT(buffer_locked(bh
));
132 } while ((bh
= bh
->b_this_page
) != head
);
133 bit_spin_unlock(BH_Uptodate_Lock
, &head
->b_state
);
134 local_irq_restore(flags
);
137 end_page_writeback(bvec
->bv_page
);
141 * We're now finished for good with this ioend structure. Update the page
142 * state, release holds on bios, and finally free up memory. Do not use the
147 struct xfs_ioend
*ioend
,
150 struct inode
*inode
= ioend
->io_inode
;
151 struct bio
*bio
= &ioend
->io_inline_bio
;
152 struct bio
*last
= ioend
->io_bio
, *next
;
153 u64 start
= bio
->bi_iter
.bi_sector
;
154 bool quiet
= bio_flagged(bio
, BIO_QUIET
);
156 for (bio
= &ioend
->io_inline_bio
; bio
; bio
= next
) {
157 struct bio_vec
*bvec
;
161 * For the last bio, bi_private points to the ioend, so we
162 * need to explicitly end the iteration here.
167 next
= bio
->bi_private
;
169 /* walk each page on bio, ending page IO on them */
170 bio_for_each_segment_all(bvec
, bio
, i
)
171 xfs_finish_page_writeback(inode
, bvec
, error
);
176 if (unlikely(error
&& !quiet
)) {
177 xfs_err_ratelimited(XFS_I(inode
)->i_mount
,
178 "writeback error on sector %llu", start
);
183 * Fast and loose check if this write could update the on-disk inode size.
185 static inline bool xfs_ioend_is_append(struct xfs_ioend
*ioend
)
187 return ioend
->io_offset
+ ioend
->io_size
>
188 XFS_I(ioend
->io_inode
)->i_d
.di_size
;
192 xfs_setfilesize_trans_alloc(
193 struct xfs_ioend
*ioend
)
195 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
196 struct xfs_trans
*tp
;
199 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
);
203 ioend
->io_append_trans
= tp
;
206 * We may pass freeze protection with a transaction. So tell lockdep
209 __sb_writers_release(ioend
->io_inode
->i_sb
, SB_FREEZE_FS
);
211 * We hand off the transaction to the completion thread now, so
212 * clear the flag here.
214 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
219 * Update on-disk file size now that data has been written to disk.
223 struct xfs_inode
*ip
,
224 struct xfs_trans
*tp
,
230 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
231 isize
= xfs_new_eof(ip
, offset
+ size
);
233 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
234 xfs_trans_cancel(tp
);
238 trace_xfs_setfilesize(ip
, offset
, size
);
240 ip
->i_d
.di_size
= isize
;
241 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
242 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
244 return xfs_trans_commit(tp
);
249 struct xfs_inode
*ip
,
253 struct xfs_mount
*mp
= ip
->i_mount
;
254 struct xfs_trans
*tp
;
257 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
);
261 return __xfs_setfilesize(ip
, tp
, offset
, size
);
265 xfs_setfilesize_ioend(
266 struct xfs_ioend
*ioend
,
269 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
270 struct xfs_trans
*tp
= ioend
->io_append_trans
;
273 * The transaction may have been allocated in the I/O submission thread,
274 * thus we need to mark ourselves as being in a transaction manually.
275 * Similarly for freeze protection.
277 current_set_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
278 __sb_writers_acquired(VFS_I(ip
)->i_sb
, SB_FREEZE_FS
);
280 /* we abort the update if there was an IO error */
282 xfs_trans_cancel(tp
);
286 return __xfs_setfilesize(ip
, tp
, ioend
->io_offset
, ioend
->io_size
);
290 * IO write completion.
294 struct work_struct
*work
)
296 struct xfs_ioend
*ioend
=
297 container_of(work
, struct xfs_ioend
, io_work
);
298 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
299 xfs_off_t offset
= ioend
->io_offset
;
300 size_t size
= ioend
->io_size
;
304 * Just clean up the in-memory strutures if the fs has been shut down.
306 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
312 * Clean up any COW blocks on an I/O error.
314 error
= blk_status_to_errno(ioend
->io_bio
->bi_status
);
315 if (unlikely(error
)) {
316 switch (ioend
->io_type
) {
318 xfs_reflink_cancel_cow_range(ip
, offset
, size
, true);
326 * Success: commit the COW or unwritten blocks if needed.
328 switch (ioend
->io_type
) {
330 error
= xfs_reflink_end_cow(ip
, offset
, size
);
332 case XFS_IO_UNWRITTEN
:
333 /* writeback should never update isize */
334 error
= xfs_iomap_write_unwritten(ip
, offset
, size
, false);
337 ASSERT(!xfs_ioend_is_append(ioend
) || ioend
->io_append_trans
);
342 if (ioend
->io_append_trans
)
343 error
= xfs_setfilesize_ioend(ioend
, error
);
344 xfs_destroy_ioend(ioend
, error
);
351 struct xfs_ioend
*ioend
= bio
->bi_private
;
352 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
354 if (ioend
->io_type
== XFS_IO_UNWRITTEN
|| ioend
->io_type
== XFS_IO_COW
)
355 queue_work(mp
->m_unwritten_workqueue
, &ioend
->io_work
);
356 else if (ioend
->io_append_trans
)
357 queue_work(mp
->m_data_workqueue
, &ioend
->io_work
);
359 xfs_destroy_ioend(ioend
, blk_status_to_errno(bio
->bi_status
));
366 struct xfs_bmbt_irec
*imap
,
369 struct xfs_inode
*ip
= XFS_I(inode
);
370 struct xfs_mount
*mp
= ip
->i_mount
;
371 ssize_t count
= i_blocksize(inode
);
372 xfs_fileoff_t offset_fsb
, end_fsb
;
374 int bmapi_flags
= XFS_BMAPI_ENTIRE
;
377 if (XFS_FORCED_SHUTDOWN(mp
))
380 ASSERT(type
!= XFS_IO_COW
);
381 if (type
== XFS_IO_UNWRITTEN
)
382 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
384 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
385 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
386 (ip
->i_df
.if_flags
& XFS_IFEXTENTS
));
387 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
389 if (offset
+ count
> mp
->m_super
->s_maxbytes
)
390 count
= mp
->m_super
->s_maxbytes
- offset
;
391 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
392 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
393 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
394 imap
, &nimaps
, bmapi_flags
);
396 * Truncate an overwrite extent if there's a pending CoW
397 * reservation before the end of this extent. This forces us
398 * to come back to writepage to take care of the CoW.
400 if (nimaps
&& type
== XFS_IO_OVERWRITE
)
401 xfs_reflink_trim_irec_to_next_cow(ip
, offset_fsb
, imap
);
402 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
407 if (type
== XFS_IO_DELALLOC
&&
408 (!nimaps
|| isnullstartblock(imap
->br_startblock
))) {
409 error
= xfs_iomap_write_allocate(ip
, XFS_DATA_FORK
, offset
,
412 trace_xfs_map_blocks_alloc(ip
, offset
, count
, type
, imap
);
417 if (type
== XFS_IO_UNWRITTEN
) {
419 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
420 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
424 trace_xfs_map_blocks_found(ip
, offset
, count
, type
, imap
);
431 struct xfs_bmbt_irec
*imap
,
434 offset
>>= inode
->i_blkbits
;
437 * We have to make sure the cached mapping is within EOF to protect
438 * against eofblocks trimming on file release leaving us with a stale
439 * mapping. Otherwise, a page for a subsequent file extending buffered
440 * write could get picked up by this writeback cycle and written to the
443 * Note that what we really want here is a generic mapping invalidation
444 * mechanism to protect us from arbitrary extent modifying contexts, not
447 xfs_trim_extent_eof(imap
, XFS_I(inode
));
449 return offset
>= imap
->br_startoff
&&
450 offset
< imap
->br_startoff
+ imap
->br_blockcount
;
454 xfs_start_buffer_writeback(
455 struct buffer_head
*bh
)
457 ASSERT(buffer_mapped(bh
));
458 ASSERT(buffer_locked(bh
));
459 ASSERT(!buffer_delay(bh
));
460 ASSERT(!buffer_unwritten(bh
));
463 set_buffer_async_write(bh
);
464 set_buffer_uptodate(bh
);
465 clear_buffer_dirty(bh
);
469 xfs_start_page_writeback(
473 ASSERT(PageLocked(page
));
474 ASSERT(!PageWriteback(page
));
477 * if the page was not fully cleaned, we need to ensure that the higher
478 * layers come back to it correctly. That means we need to keep the page
479 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
480 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
481 * write this page in this writeback sweep will be made.
484 clear_page_dirty_for_io(page
);
485 set_page_writeback(page
);
487 set_page_writeback_keepwrite(page
);
492 static inline int xfs_bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
494 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
498 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
499 * it, and we submit that bio. The ioend may be used for multiple bio
500 * submissions, so we only want to allocate an append transaction for the ioend
501 * once. In the case of multiple bio submission, each bio will take an IO
502 * reference to the ioend to ensure that the ioend completion is only done once
503 * all bios have been submitted and the ioend is really done.
505 * If @fail is non-zero, it means that we have a situation where some part of
506 * the submission process has failed after we have marked paged for writeback
507 * and unlocked them. In this situation, we need to fail the bio and ioend
508 * rather than submit it to IO. This typically only happens on a filesystem
513 struct writeback_control
*wbc
,
514 struct xfs_ioend
*ioend
,
517 /* Convert CoW extents to regular */
518 if (!status
&& ioend
->io_type
== XFS_IO_COW
) {
519 status
= xfs_reflink_convert_cow(XFS_I(ioend
->io_inode
),
520 ioend
->io_offset
, ioend
->io_size
);
523 /* Reserve log space if we might write beyond the on-disk inode size. */
525 ioend
->io_type
!= XFS_IO_UNWRITTEN
&&
526 xfs_ioend_is_append(ioend
) &&
527 !ioend
->io_append_trans
)
528 status
= xfs_setfilesize_trans_alloc(ioend
);
530 ioend
->io_bio
->bi_private
= ioend
;
531 ioend
->io_bio
->bi_end_io
= xfs_end_bio
;
532 ioend
->io_bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
535 * If we are failing the IO now, just mark the ioend with an
536 * error and finish it. This will run IO completion immediately
537 * as there is only one reference to the ioend at this point in
541 ioend
->io_bio
->bi_status
= errno_to_blk_status(status
);
542 bio_endio(ioend
->io_bio
);
546 ioend
->io_bio
->bi_write_hint
= ioend
->io_inode
->i_write_hint
;
547 submit_bio(ioend
->io_bio
);
552 xfs_init_bio_from_bh(
554 struct buffer_head
*bh
)
556 bio
->bi_iter
.bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
557 bio
->bi_bdev
= bh
->b_bdev
;
560 static struct xfs_ioend
*
565 struct buffer_head
*bh
)
567 struct xfs_ioend
*ioend
;
570 bio
= bio_alloc_bioset(GFP_NOFS
, BIO_MAX_PAGES
, xfs_ioend_bioset
);
571 xfs_init_bio_from_bh(bio
, bh
);
573 ioend
= container_of(bio
, struct xfs_ioend
, io_inline_bio
);
574 INIT_LIST_HEAD(&ioend
->io_list
);
575 ioend
->io_type
= type
;
576 ioend
->io_inode
= inode
;
578 ioend
->io_offset
= offset
;
579 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
580 ioend
->io_append_trans
= NULL
;
586 * Allocate a new bio, and chain the old bio to the new one.
588 * Note that we have to do perform the chaining in this unintuitive order
589 * so that the bi_private linkage is set up in the right direction for the
590 * traversal in xfs_destroy_ioend().
594 struct xfs_ioend
*ioend
,
595 struct writeback_control
*wbc
,
596 struct buffer_head
*bh
)
600 new = bio_alloc(GFP_NOFS
, BIO_MAX_PAGES
);
601 xfs_init_bio_from_bh(new, bh
);
603 bio_chain(ioend
->io_bio
, new);
604 bio_get(ioend
->io_bio
); /* for xfs_destroy_ioend */
605 ioend
->io_bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
606 ioend
->io_bio
->bi_write_hint
= ioend
->io_inode
->i_write_hint
;
607 submit_bio(ioend
->io_bio
);
612 * Test to see if we've been building up a completion structure for
613 * earlier buffers -- if so, we try to append to this ioend if we
614 * can, otherwise we finish off any current ioend and start another.
615 * Return the ioend we finished off so that the caller can submit it
616 * once it has finished processing the dirty page.
621 struct buffer_head
*bh
,
623 struct xfs_writepage_ctx
*wpc
,
624 struct writeback_control
*wbc
,
625 struct list_head
*iolist
)
627 if (!wpc
->ioend
|| wpc
->io_type
!= wpc
->ioend
->io_type
||
628 bh
->b_blocknr
!= wpc
->last_block
+ 1 ||
629 offset
!= wpc
->ioend
->io_offset
+ wpc
->ioend
->io_size
) {
631 list_add(&wpc
->ioend
->io_list
, iolist
);
632 wpc
->ioend
= xfs_alloc_ioend(inode
, wpc
->io_type
, offset
, bh
);
636 * If the buffer doesn't fit into the bio we need to allocate a new
637 * one. This shouldn't happen more than once for a given buffer.
639 while (xfs_bio_add_buffer(wpc
->ioend
->io_bio
, bh
) != bh
->b_size
)
640 xfs_chain_bio(wpc
->ioend
, wbc
, bh
);
642 wpc
->ioend
->io_size
+= bh
->b_size
;
643 wpc
->last_block
= bh
->b_blocknr
;
644 xfs_start_buffer_writeback(bh
);
650 struct buffer_head
*bh
,
651 struct xfs_bmbt_irec
*imap
,
655 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
656 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, imap
->br_startoff
);
657 xfs_daddr_t iomap_bn
= xfs_fsb_to_db(XFS_I(inode
), imap
->br_startblock
);
659 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
660 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
662 bn
= (iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
)) +
663 ((offset
- iomap_offset
) >> inode
->i_blkbits
);
665 ASSERT(bn
|| XFS_IS_REALTIME_INODE(XFS_I(inode
)));
668 set_buffer_mapped(bh
);
674 struct buffer_head
*bh
,
675 struct xfs_bmbt_irec
*imap
,
678 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
679 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
681 xfs_map_buffer(inode
, bh
, imap
, offset
);
682 set_buffer_mapped(bh
);
683 clear_buffer_delay(bh
);
684 clear_buffer_unwritten(bh
);
688 * Test if a given page contains at least one buffer of a given @type.
689 * If @check_all_buffers is true, then we walk all the buffers in the page to
690 * try to find one of the type passed in. If it is not set, then the caller only
691 * needs to check the first buffer on the page for a match.
697 bool check_all_buffers
)
699 struct buffer_head
*bh
;
700 struct buffer_head
*head
;
702 if (PageWriteback(page
))
706 if (!page_has_buffers(page
))
709 bh
= head
= page_buffers(page
);
711 if (buffer_unwritten(bh
)) {
712 if (type
== XFS_IO_UNWRITTEN
)
714 } else if (buffer_delay(bh
)) {
715 if (type
== XFS_IO_DELALLOC
)
717 } else if (buffer_dirty(bh
) && buffer_mapped(bh
)) {
718 if (type
== XFS_IO_OVERWRITE
)
722 /* If we are only checking the first buffer, we are done now. */
723 if (!check_all_buffers
)
725 } while ((bh
= bh
->b_this_page
) != head
);
731 xfs_vm_invalidatepage(
736 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
,
740 * If we are invalidating the entire page, clear the dirty state from it
741 * so that we can check for attempts to release dirty cached pages in
742 * xfs_vm_releasepage().
744 if (offset
== 0 && length
>= PAGE_SIZE
)
745 cancel_dirty_page(page
);
746 block_invalidatepage(page
, offset
, length
);
750 * If the page has delalloc buffers on it, we need to punch them out before we
751 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
752 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
753 * is done on that same region - the delalloc extent is returned when none is
754 * supposed to be there.
756 * We prevent this by truncating away the delalloc regions on the page before
757 * invalidating it. Because they are delalloc, we can do this without needing a
758 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
759 * truncation without a transaction as there is no space left for block
760 * reservation (typically why we see a ENOSPC in writeback).
762 * This is not a performance critical path, so for now just do the punching a
763 * buffer head at a time.
766 xfs_aops_discard_page(
769 struct inode
*inode
= page
->mapping
->host
;
770 struct xfs_inode
*ip
= XFS_I(inode
);
771 struct buffer_head
*bh
, *head
;
772 loff_t offset
= page_offset(page
);
774 if (!xfs_check_page_type(page
, XFS_IO_DELALLOC
, true))
777 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
780 xfs_alert(ip
->i_mount
,
781 "page discard on page %p, inode 0x%llx, offset %llu.",
782 page
, ip
->i_ino
, offset
);
784 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
785 bh
= head
= page_buffers(page
);
788 xfs_fileoff_t start_fsb
;
790 if (!buffer_delay(bh
))
793 start_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
794 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
, 1);
796 /* something screwed, just bail */
797 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
798 xfs_alert(ip
->i_mount
,
799 "page discard unable to remove delalloc mapping.");
804 offset
+= i_blocksize(inode
);
806 } while ((bh
= bh
->b_this_page
) != head
);
808 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
810 xfs_vm_invalidatepage(page
, 0, PAGE_SIZE
);
816 struct xfs_writepage_ctx
*wpc
,
819 unsigned int *new_type
)
821 struct xfs_inode
*ip
= XFS_I(inode
);
822 struct xfs_bmbt_irec imap
;
827 * If we already have a valid COW mapping keep using it.
829 if (wpc
->io_type
== XFS_IO_COW
) {
830 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
, offset
);
831 if (wpc
->imap_valid
) {
832 *new_type
= XFS_IO_COW
;
838 * Else we need to check if there is a COW mapping at this offset.
840 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
841 is_cow
= xfs_reflink_find_cow_mapping(ip
, offset
, &imap
);
842 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
848 * And if the COW mapping has a delayed extent here we need to
849 * allocate real space for it now.
851 if (isnullstartblock(imap
.br_startblock
)) {
852 error
= xfs_iomap_write_allocate(ip
, XFS_COW_FORK
, offset
,
858 wpc
->io_type
= *new_type
= XFS_IO_COW
;
859 wpc
->imap_valid
= true;
865 * We implement an immediate ioend submission policy here to avoid needing to
866 * chain multiple ioends and hence nest mempool allocations which can violate
867 * forward progress guarantees we need to provide. The current ioend we are
868 * adding buffers to is cached on the writepage context, and if the new buffer
869 * does not append to the cached ioend it will create a new ioend and cache that
872 * If a new ioend is created and cached, the old ioend is returned and queued
873 * locally for submission once the entire page is processed or an error has been
874 * detected. While ioends are submitted immediately after they are completed,
875 * batching optimisations are provided by higher level block plugging.
877 * At the end of a writeback pass, there will be a cached ioend remaining on the
878 * writepage context that the caller will need to submit.
882 struct xfs_writepage_ctx
*wpc
,
883 struct writeback_control
*wbc
,
889 LIST_HEAD(submit_list
);
890 struct xfs_ioend
*ioend
, *next
;
891 struct buffer_head
*bh
, *head
;
892 ssize_t len
= i_blocksize(inode
);
896 unsigned int new_type
;
898 bh
= head
= page_buffers(page
);
899 offset
= page_offset(page
);
901 if (offset
>= end_offset
)
903 if (!buffer_uptodate(bh
))
907 * set_page_dirty dirties all buffers in a page, independent
908 * of their state. The dirty state however is entirely
909 * meaningless for holes (!mapped && uptodate), so skip
910 * buffers covering holes here.
912 if (!buffer_mapped(bh
) && buffer_uptodate(bh
)) {
913 wpc
->imap_valid
= false;
917 if (buffer_unwritten(bh
))
918 new_type
= XFS_IO_UNWRITTEN
;
919 else if (buffer_delay(bh
))
920 new_type
= XFS_IO_DELALLOC
;
921 else if (buffer_uptodate(bh
))
922 new_type
= XFS_IO_OVERWRITE
;
924 if (PageUptodate(page
))
925 ASSERT(buffer_mapped(bh
));
927 * This buffer is not uptodate and will not be
928 * written to disk. Ensure that we will put any
929 * subsequent writeable buffers into a new
932 wpc
->imap_valid
= false;
936 if (xfs_is_reflink_inode(XFS_I(inode
))) {
937 error
= xfs_map_cow(wpc
, inode
, offset
, &new_type
);
942 if (wpc
->io_type
!= new_type
) {
943 wpc
->io_type
= new_type
;
944 wpc
->imap_valid
= false;
948 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
,
950 if (!wpc
->imap_valid
) {
951 error
= xfs_map_blocks(inode
, offset
, &wpc
->imap
,
955 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
,
958 if (wpc
->imap_valid
) {
960 if (wpc
->io_type
!= XFS_IO_OVERWRITE
)
961 xfs_map_at_offset(inode
, bh
, &wpc
->imap
, offset
);
962 xfs_add_to_ioend(inode
, bh
, offset
, wpc
, wbc
, &submit_list
);
966 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
968 if (uptodate
&& bh
== head
)
969 SetPageUptodate(page
);
971 ASSERT(wpc
->ioend
|| list_empty(&submit_list
));
975 * On error, we have to fail the ioend here because we have locked
976 * buffers in the ioend. If we don't do this, we'll deadlock
977 * invalidating the page as that tries to lock the buffers on the page.
978 * Also, because we may have set pages under writeback, we have to make
979 * sure we run IO completion to mark the error state of the IO
980 * appropriately, so we can't cancel the ioend directly here. That means
981 * we have to mark this page as under writeback if we included any
982 * buffers from it in the ioend chain so that completion treats it
985 * If we didn't include the page in the ioend, the on error we can
986 * simply discard and unlock it as there are no other users of the page
987 * or it's buffers right now. The caller will still need to trigger
988 * submission of outstanding ioends on the writepage context so they are
989 * treated correctly on error.
992 xfs_start_page_writeback(page
, !error
);
995 * Preserve the original error if there was one, otherwise catch
996 * submission errors here and propagate into subsequent ioend
999 list_for_each_entry_safe(ioend
, next
, &submit_list
, io_list
) {
1002 list_del_init(&ioend
->io_list
);
1003 error2
= xfs_submit_ioend(wbc
, ioend
, error
);
1004 if (error2
&& !error
)
1008 xfs_aops_discard_page(page
);
1009 ClearPageUptodate(page
);
1013 * We can end up here with no error and nothing to write if we
1014 * race with a partial page truncate on a sub-page block sized
1015 * filesystem. In that case we need to mark the page clean.
1017 xfs_start_page_writeback(page
, 1);
1018 end_page_writeback(page
);
1021 mapping_set_error(page
->mapping
, error
);
1026 * Write out a dirty page.
1028 * For delalloc space on the page we need to allocate space and flush it.
1029 * For unwritten space on the page we need to start the conversion to
1030 * regular allocated space.
1031 * For any other dirty buffer heads on the page we should flush them.
1036 struct writeback_control
*wbc
,
1039 struct xfs_writepage_ctx
*wpc
= data
;
1040 struct inode
*inode
= page
->mapping
->host
;
1042 uint64_t end_offset
;
1045 trace_xfs_writepage(inode
, page
, 0, 0);
1047 ASSERT(page_has_buffers(page
));
1050 * Refuse to write the page out if we are called from reclaim context.
1052 * This avoids stack overflows when called from deeply used stacks in
1053 * random callers for direct reclaim or memcg reclaim. We explicitly
1054 * allow reclaim from kswapd as the stack usage there is relatively low.
1056 * This should never happen except in the case of a VM regression so
1059 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
1064 * Given that we do not allow direct reclaim to call us, we should
1065 * never be called while in a filesystem transaction.
1067 if (WARN_ON_ONCE(current
->flags
& PF_MEMALLOC_NOFS
))
1071 * Is this page beyond the end of the file?
1073 * The page index is less than the end_index, adjust the end_offset
1074 * to the highest offset that this page should represent.
1075 * -----------------------------------------------------
1076 * | file mapping | <EOF> |
1077 * -----------------------------------------------------
1078 * | Page ... | Page N-2 | Page N-1 | Page N | |
1079 * ^--------------------------------^----------|--------
1080 * | desired writeback range | see else |
1081 * ---------------------------------^------------------|
1083 offset
= i_size_read(inode
);
1084 end_index
= offset
>> PAGE_SHIFT
;
1085 if (page
->index
< end_index
)
1086 end_offset
= (xfs_off_t
)(page
->index
+ 1) << PAGE_SHIFT
;
1089 * Check whether the page to write out is beyond or straddles
1091 * -------------------------------------------------------
1092 * | file mapping | <EOF> |
1093 * -------------------------------------------------------
1094 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1095 * ^--------------------------------^-----------|---------
1097 * ---------------------------------^-----------|--------|
1099 unsigned offset_into_page
= offset
& (PAGE_SIZE
- 1);
1102 * Skip the page if it is fully outside i_size, e.g. due to a
1103 * truncate operation that is in progress. We must redirty the
1104 * page so that reclaim stops reclaiming it. Otherwise
1105 * xfs_vm_releasepage() is called on it and gets confused.
1107 * Note that the end_index is unsigned long, it would overflow
1108 * if the given offset is greater than 16TB on 32-bit system
1109 * and if we do check the page is fully outside i_size or not
1110 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1111 * will be evaluated to 0. Hence this page will be redirtied
1112 * and be written out repeatedly which would result in an
1113 * infinite loop, the user program that perform this operation
1114 * will hang. Instead, we can verify this situation by checking
1115 * if the page to write is totally beyond the i_size or if it's
1116 * offset is just equal to the EOF.
1118 if (page
->index
> end_index
||
1119 (page
->index
== end_index
&& offset_into_page
== 0))
1123 * The page straddles i_size. It must be zeroed out on each
1124 * and every writepage invocation because it may be mmapped.
1125 * "A file is mapped in multiples of the page size. For a file
1126 * that is not a multiple of the page size, the remaining
1127 * memory is zeroed when mapped, and writes to that region are
1128 * not written out to the file."
1130 zero_user_segment(page
, offset_into_page
, PAGE_SIZE
);
1132 /* Adjust the end_offset to the end of file */
1133 end_offset
= offset
;
1136 return xfs_writepage_map(wpc
, wbc
, inode
, page
, offset
, end_offset
);
1139 redirty_page_for_writepage(wbc
, page
);
1147 struct writeback_control
*wbc
)
1149 struct xfs_writepage_ctx wpc
= {
1150 .io_type
= XFS_IO_INVALID
,
1154 ret
= xfs_do_writepage(page
, wbc
, &wpc
);
1156 ret
= xfs_submit_ioend(wbc
, wpc
.ioend
, ret
);
1162 struct address_space
*mapping
,
1163 struct writeback_control
*wbc
)
1165 struct xfs_writepage_ctx wpc
= {
1166 .io_type
= XFS_IO_INVALID
,
1170 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1171 if (dax_mapping(mapping
))
1172 return dax_writeback_mapping_range(mapping
,
1173 xfs_find_bdev_for_inode(mapping
->host
), wbc
);
1175 ret
= write_cache_pages(mapping
, wbc
, xfs_do_writepage
, &wpc
);
1177 ret
= xfs_submit_ioend(wbc
, wpc
.ioend
, ret
);
1182 * Called to move a page into cleanable state - and from there
1183 * to be released. The page should already be clean. We always
1184 * have buffer heads in this call.
1186 * Returns 1 if the page is ok to release, 0 otherwise.
1193 int delalloc
, unwritten
;
1195 trace_xfs_releasepage(page
->mapping
->host
, page
, 0, 0);
1198 * mm accommodates an old ext3 case where clean pages might not have had
1199 * the dirty bit cleared. Thus, it can send actual dirty pages to
1200 * ->releasepage() via shrink_active_list(). Conversely,
1201 * block_invalidatepage() can send pages that are still marked dirty but
1202 * otherwise have invalidated buffers.
1204 * We want to release the latter to avoid unnecessary buildup of the
1205 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1206 * that are entirely invalidated and need to be released. Hence the
1207 * only time we should get dirty pages here is through
1208 * shrink_active_list() and so we can simply skip those now.
1210 * warn if we've left any lingering delalloc/unwritten buffers on clean
1211 * or invalidated pages we are about to release.
1213 if (PageDirty(page
))
1216 xfs_count_page_state(page
, &delalloc
, &unwritten
);
1218 if (WARN_ON_ONCE(delalloc
))
1220 if (WARN_ON_ONCE(unwritten
))
1223 return try_to_free_buffers(page
);
1227 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1228 * is, so that we can avoid repeated get_blocks calls.
1230 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1231 * for blocks beyond EOF must be marked new so that sub block regions can be
1232 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1233 * was just allocated or is unwritten, otherwise the callers would overwrite
1234 * existing data with zeros. Hence we have to split the mapping into a range up
1235 * to and including EOF, and a second mapping for beyond EOF.
1239 struct inode
*inode
,
1241 struct buffer_head
*bh_result
,
1242 struct xfs_bmbt_irec
*imap
,
1246 xfs_off_t mapping_size
;
1248 mapping_size
= imap
->br_startoff
+ imap
->br_blockcount
- iblock
;
1249 mapping_size
<<= inode
->i_blkbits
;
1251 ASSERT(mapping_size
> 0);
1252 if (mapping_size
> size
)
1253 mapping_size
= size
;
1254 if (offset
< i_size_read(inode
) &&
1255 offset
+ mapping_size
>= i_size_read(inode
)) {
1256 /* limit mapping to block that spans EOF */
1257 mapping_size
= roundup_64(i_size_read(inode
) - offset
,
1258 i_blocksize(inode
));
1260 if (mapping_size
> LONG_MAX
)
1261 mapping_size
= LONG_MAX
;
1263 bh_result
->b_size
= mapping_size
;
1268 struct inode
*inode
,
1270 struct buffer_head
*bh_result
,
1273 struct xfs_inode
*ip
= XFS_I(inode
);
1274 struct xfs_mount
*mp
= ip
->i_mount
;
1275 xfs_fileoff_t offset_fsb
, end_fsb
;
1278 struct xfs_bmbt_irec imap
;
1285 if (XFS_FORCED_SHUTDOWN(mp
))
1288 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1289 ASSERT(bh_result
->b_size
>= i_blocksize(inode
));
1290 size
= bh_result
->b_size
;
1292 if (offset
>= i_size_read(inode
))
1296 * Direct I/O is usually done on preallocated files, so try getting
1297 * a block mapping without an exclusive lock first.
1299 lockmode
= xfs_ilock_data_map_shared(ip
);
1301 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
1302 if (offset
+ size
> mp
->m_super
->s_maxbytes
)
1303 size
= mp
->m_super
->s_maxbytes
- offset
;
1304 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ size
);
1305 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1307 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
1308 &imap
, &nimaps
, XFS_BMAPI_ENTIRE
);
1313 trace_xfs_get_blocks_found(ip
, offset
, size
,
1314 imap
.br_state
== XFS_EXT_UNWRITTEN
?
1315 XFS_IO_UNWRITTEN
: XFS_IO_OVERWRITE
, &imap
);
1316 xfs_iunlock(ip
, lockmode
);
1318 trace_xfs_get_blocks_notfound(ip
, offset
, size
);
1322 /* trim mapping down to size requested */
1323 xfs_map_trim_size(inode
, iblock
, bh_result
, &imap
, offset
, size
);
1326 * For unwritten extents do not report a disk address in the buffered
1327 * read case (treat as if we're reading into a hole).
1329 if (xfs_bmap_is_real_extent(&imap
))
1330 xfs_map_buffer(inode
, bh_result
, &imap
, offset
);
1333 * If this is a realtime file, data may be on a different device.
1334 * to that pointed to from the buffer_head b_bdev currently.
1336 bh_result
->b_bdev
= xfs_find_bdev_for_inode(inode
);
1340 xfs_iunlock(ip
, lockmode
);
1347 struct iov_iter
*iter
)
1350 * We just need the method present so that open/fcntl allow direct I/O.
1357 struct address_space
*mapping
,
1360 struct inode
*inode
= (struct inode
*)mapping
->host
;
1361 struct xfs_inode
*ip
= XFS_I(inode
);
1363 trace_xfs_vm_bmap(XFS_I(inode
));
1366 * The swap code (ab-)uses ->bmap to get a block mapping and then
1367 * bypasseѕ the file system for actual I/O. We really can't allow
1368 * that on reflinks inodes, so we have to skip out here. And yes,
1369 * 0 is the magic code for a bmap error.
1371 * Since we don't pass back blockdev info, we can't return bmap
1372 * information for rt files either.
1374 if (xfs_is_reflink_inode(ip
) || XFS_IS_REALTIME_INODE(ip
))
1377 filemap_write_and_wait(mapping
);
1378 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1383 struct file
*unused
,
1386 trace_xfs_vm_readpage(page
->mapping
->host
, 1);
1387 return mpage_readpage(page
, xfs_get_blocks
);
1392 struct file
*unused
,
1393 struct address_space
*mapping
,
1394 struct list_head
*pages
,
1397 trace_xfs_vm_readpages(mapping
->host
, nr_pages
);
1398 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1402 * This is basically a copy of __set_page_dirty_buffers() with one
1403 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1404 * dirty, we'll never be able to clean them because we don't write buffers
1405 * beyond EOF, and that means we can't invalidate pages that span EOF
1406 * that have been marked dirty. Further, the dirty state can leak into
1407 * the file interior if the file is extended, resulting in all sorts of
1408 * bad things happening as the state does not match the underlying data.
1410 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1411 * this only exist because of bufferheads and how the generic code manages them.
1414 xfs_vm_set_page_dirty(
1417 struct address_space
*mapping
= page
->mapping
;
1418 struct inode
*inode
= mapping
->host
;
1423 if (unlikely(!mapping
))
1424 return !TestSetPageDirty(page
);
1426 end_offset
= i_size_read(inode
);
1427 offset
= page_offset(page
);
1429 spin_lock(&mapping
->private_lock
);
1430 if (page_has_buffers(page
)) {
1431 struct buffer_head
*head
= page_buffers(page
);
1432 struct buffer_head
*bh
= head
;
1435 if (offset
< end_offset
)
1436 set_buffer_dirty(bh
);
1437 bh
= bh
->b_this_page
;
1438 offset
+= i_blocksize(inode
);
1439 } while (bh
!= head
);
1442 * Lock out page->mem_cgroup migration to keep PageDirty
1443 * synchronized with per-memcg dirty page counters.
1445 lock_page_memcg(page
);
1446 newly_dirty
= !TestSetPageDirty(page
);
1447 spin_unlock(&mapping
->private_lock
);
1450 /* sigh - __set_page_dirty() is static, so copy it here, too */
1451 unsigned long flags
;
1453 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
1454 if (page
->mapping
) { /* Race with truncate? */
1455 WARN_ON_ONCE(!PageUptodate(page
));
1456 account_page_dirtied(page
, mapping
);
1457 radix_tree_tag_set(&mapping
->page_tree
,
1458 page_index(page
), PAGECACHE_TAG_DIRTY
);
1460 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
1462 unlock_page_memcg(page
);
1464 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1468 const struct address_space_operations xfs_address_space_operations
= {
1469 .readpage
= xfs_vm_readpage
,
1470 .readpages
= xfs_vm_readpages
,
1471 .writepage
= xfs_vm_writepage
,
1472 .writepages
= xfs_vm_writepages
,
1473 .set_page_dirty
= xfs_vm_set_page_dirty
,
1474 .releasepage
= xfs_vm_releasepage
,
1475 .invalidatepage
= xfs_vm_invalidatepage
,
1476 .bmap
= xfs_vm_bmap
,
1477 .direct_IO
= xfs_vm_direct_IO
,
1478 .migratepage
= buffer_migrate_page
,
1479 .is_partially_uptodate
= block_is_partially_uptodate
,
1480 .error_remove_page
= generic_error_remove_page
,