2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dir2_sf.h"
32 #include "xfs_attr_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_alloc.h"
36 #include "xfs_btree.h"
37 #include "xfs_error.h"
39 #include "xfs_iomap.h"
40 #include "xfs_vnodeops.h"
41 #include "xfs_trace.h"
43 #include <linux/gfp.h>
44 #include <linux/mpage.h>
45 #include <linux/pagevec.h>
46 #include <linux/writeback.h>
50 * Prime number of hash buckets since address is used as the key.
53 #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
54 static wait_queue_head_t xfs_ioend_wq
[NVSYNC
];
61 for (i
= 0; i
< NVSYNC
; i
++)
62 init_waitqueue_head(&xfs_ioend_wq
[i
]);
69 wait_queue_head_t
*wq
= to_ioend_wq(ip
);
71 wait_event(*wq
, (atomic_read(&ip
->i_iocount
) == 0));
78 if (atomic_dec_and_test(&ip
->i_iocount
))
79 wake_up(to_ioend_wq(ip
));
89 struct buffer_head
*bh
, *head
;
91 *delalloc
= *unmapped
= *unwritten
= 0;
93 bh
= head
= page_buffers(page
);
95 if (buffer_uptodate(bh
) && !buffer_mapped(bh
))
97 else if (buffer_unwritten(bh
))
99 else if (buffer_delay(bh
))
101 } while ((bh
= bh
->b_this_page
) != head
);
104 STATIC
struct block_device
*
105 xfs_find_bdev_for_inode(
108 struct xfs_inode
*ip
= XFS_I(inode
);
109 struct xfs_mount
*mp
= ip
->i_mount
;
111 if (XFS_IS_REALTIME_INODE(ip
))
112 return mp
->m_rtdev_targp
->bt_bdev
;
114 return mp
->m_ddev_targp
->bt_bdev
;
118 * We're now finished for good with this ioend structure.
119 * Update the page state via the associated buffer_heads,
120 * release holds on the inode and bio, and finally free
121 * up memory. Do not use the ioend after this.
127 struct buffer_head
*bh
, *next
;
128 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
130 for (bh
= ioend
->io_buffer_head
; bh
; bh
= next
) {
131 next
= bh
->b_private
;
132 bh
->b_end_io(bh
, !ioend
->io_error
);
136 * Volume managers supporting multiple paths can send back ENODEV
137 * when the final path disappears. In this case continuing to fill
138 * the page cache with dirty data which cannot be written out is
139 * evil, so prevent that.
141 if (unlikely(ioend
->io_error
== -ENODEV
)) {
142 xfs_do_force_shutdown(ip
->i_mount
, SHUTDOWN_DEVICE_REQ
,
147 mempool_free(ioend
, xfs_ioend_pool
);
151 * If the end of the current ioend is beyond the current EOF,
152 * return the new EOF value, otherwise zero.
158 xfs_inode_t
*ip
= XFS_I(ioend
->io_inode
);
162 bsize
= ioend
->io_offset
+ ioend
->io_size
;
163 isize
= MAX(ip
->i_size
, ip
->i_new_size
);
164 isize
= MIN(isize
, bsize
);
165 return isize
> ip
->i_d
.di_size
? isize
: 0;
169 * Update on-disk file size now that data has been written to disk. The
170 * current in-memory file size is i_size. If a write is beyond eof i_new_size
171 * will be the intended file size until i_size is updated. If this write does
172 * not extend all the way to the valid file size then restrict this update to
173 * the end of the write.
175 * This function does not block as blocking on the inode lock in IO completion
176 * can lead to IO completion order dependency deadlocks.. If it can't get the
177 * inode ilock it will return EAGAIN. Callers must handle this.
183 xfs_inode_t
*ip
= XFS_I(ioend
->io_inode
);
186 ASSERT((ip
->i_d
.di_mode
& S_IFMT
) == S_IFREG
);
187 ASSERT(ioend
->io_type
!= IOMAP_READ
);
189 if (unlikely(ioend
->io_error
))
192 if (!xfs_ilock_nowait(ip
, XFS_ILOCK_EXCL
))
195 isize
= xfs_ioend_new_eof(ioend
);
197 ip
->i_d
.di_size
= isize
;
198 xfs_mark_inode_dirty(ip
);
201 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
206 * Schedule IO completion handling on a xfsdatad if this was
207 * the final hold on this ioend. If we are asked to wait,
208 * flush the workqueue.
215 if (atomic_dec_and_test(&ioend
->io_remaining
)) {
216 struct workqueue_struct
*wq
;
218 wq
= (ioend
->io_type
== IOMAP_UNWRITTEN
) ?
219 xfsconvertd_workqueue
: xfsdatad_workqueue
;
220 queue_work(wq
, &ioend
->io_work
);
227 * IO write completion.
231 struct work_struct
*work
)
233 xfs_ioend_t
*ioend
= container_of(work
, xfs_ioend_t
, io_work
);
234 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
238 * For unwritten extents we need to issue transactions to convert a
239 * range to normal written extens after the data I/O has finished.
241 if (ioend
->io_type
== IOMAP_UNWRITTEN
&&
242 likely(!ioend
->io_error
&& !XFS_FORCED_SHUTDOWN(ip
->i_mount
))) {
244 error
= xfs_iomap_write_unwritten(ip
, ioend
->io_offset
,
247 ioend
->io_error
= error
;
251 * We might have to update the on-disk file size after extending
254 if (ioend
->io_type
!= IOMAP_READ
) {
255 error
= xfs_setfilesize(ioend
);
256 ASSERT(!error
|| error
== EAGAIN
);
260 * If we didn't complete processing of the ioend, requeue it to the
261 * tail of the workqueue for another attempt later. Otherwise destroy
264 if (error
== EAGAIN
) {
265 atomic_inc(&ioend
->io_remaining
);
266 xfs_finish_ioend(ioend
, 0);
267 /* ensure we don't spin on blocked ioends */
270 xfs_destroy_ioend(ioend
);
274 * Allocate and initialise an IO completion structure.
275 * We need to track unwritten extent write completion here initially.
276 * We'll need to extend this for updating the ondisk inode size later
286 ioend
= mempool_alloc(xfs_ioend_pool
, GFP_NOFS
);
289 * Set the count to 1 initially, which will prevent an I/O
290 * completion callback from happening before we have started
291 * all the I/O from calling the completion routine too early.
293 atomic_set(&ioend
->io_remaining
, 1);
295 ioend
->io_list
= NULL
;
296 ioend
->io_type
= type
;
297 ioend
->io_inode
= inode
;
298 ioend
->io_buffer_head
= NULL
;
299 ioend
->io_buffer_tail
= NULL
;
300 atomic_inc(&XFS_I(ioend
->io_inode
)->i_iocount
);
301 ioend
->io_offset
= 0;
304 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
318 return -xfs_iomap(XFS_I(inode
), offset
, count
, flags
, mapp
, &nmaps
);
327 struct xfs_mount
*mp
= XFS_I(inode
)->i_mount
;
328 xfs_off_t iomap_offset
= XFS_FSB_TO_B(mp
, iomapp
->iomap_offset
);
329 xfs_off_t iomap_bsize
= XFS_FSB_TO_B(mp
, iomapp
->iomap_bsize
);
331 return offset
>= iomap_offset
&&
332 offset
< iomap_offset
+ iomap_bsize
;
336 * BIO completion handler for buffered IO.
343 xfs_ioend_t
*ioend
= bio
->bi_private
;
345 ASSERT(atomic_read(&bio
->bi_cnt
) >= 1);
346 ioend
->io_error
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
) ? 0 : error
;
348 /* Toss bio and pass work off to an xfsdatad thread */
349 bio
->bi_private
= NULL
;
350 bio
->bi_end_io
= NULL
;
353 xfs_finish_ioend(ioend
, 0);
357 xfs_submit_ioend_bio(
358 struct writeback_control
*wbc
,
362 atomic_inc(&ioend
->io_remaining
);
363 bio
->bi_private
= ioend
;
364 bio
->bi_end_io
= xfs_end_bio
;
367 * If the I/O is beyond EOF we mark the inode dirty immediately
368 * but don't update the inode size until I/O completion.
370 if (xfs_ioend_new_eof(ioend
))
371 xfs_mark_inode_dirty(XFS_I(ioend
->io_inode
));
373 submit_bio(wbc
->sync_mode
== WB_SYNC_ALL
?
374 WRITE_SYNC_PLUG
: WRITE
, bio
);
375 ASSERT(!bio_flagged(bio
, BIO_EOPNOTSUPP
));
381 struct buffer_head
*bh
)
384 int nvecs
= bio_get_nr_vecs(bh
->b_bdev
);
387 bio
= bio_alloc(GFP_NOIO
, nvecs
);
391 ASSERT(bio
->bi_private
== NULL
);
392 bio
->bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
393 bio
->bi_bdev
= bh
->b_bdev
;
399 xfs_start_buffer_writeback(
400 struct buffer_head
*bh
)
402 ASSERT(buffer_mapped(bh
));
403 ASSERT(buffer_locked(bh
));
404 ASSERT(!buffer_delay(bh
));
405 ASSERT(!buffer_unwritten(bh
));
407 mark_buffer_async_write(bh
);
408 set_buffer_uptodate(bh
);
409 clear_buffer_dirty(bh
);
413 xfs_start_page_writeback(
418 ASSERT(PageLocked(page
));
419 ASSERT(!PageWriteback(page
));
421 clear_page_dirty_for_io(page
);
422 set_page_writeback(page
);
424 /* If no buffers on the page are to be written, finish it here */
426 end_page_writeback(page
);
429 static inline int bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
431 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
435 * Submit all of the bios for all of the ioends we have saved up, covering the
436 * initial writepage page and also any probed pages.
438 * Because we may have multiple ioends spanning a page, we need to start
439 * writeback on all the buffers before we submit them for I/O. If we mark the
440 * buffers as we got, then we can end up with a page that only has buffers
441 * marked async write and I/O complete on can occur before we mark the other
442 * buffers async write.
444 * The end result of this is that we trip a bug in end_page_writeback() because
445 * we call it twice for the one page as the code in end_buffer_async_write()
446 * assumes that all buffers on the page are started at the same time.
448 * The fix is two passes across the ioend list - one to start writeback on the
449 * buffer_heads, and then submit them for I/O on the second pass.
453 struct writeback_control
*wbc
,
456 xfs_ioend_t
*head
= ioend
;
458 struct buffer_head
*bh
;
460 sector_t lastblock
= 0;
462 /* Pass 1 - start writeback */
464 next
= ioend
->io_list
;
465 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
466 xfs_start_buffer_writeback(bh
);
468 } while ((ioend
= next
) != NULL
);
470 /* Pass 2 - submit I/O */
473 next
= ioend
->io_list
;
476 for (bh
= ioend
->io_buffer_head
; bh
; bh
= bh
->b_private
) {
480 bio
= xfs_alloc_ioend_bio(bh
);
481 } else if (bh
->b_blocknr
!= lastblock
+ 1) {
482 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
486 if (bio_add_buffer(bio
, bh
) != bh
->b_size
) {
487 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
491 lastblock
= bh
->b_blocknr
;
494 xfs_submit_ioend_bio(wbc
, ioend
, bio
);
495 xfs_finish_ioend(ioend
, 0);
496 } while ((ioend
= next
) != NULL
);
500 * Cancel submission of all buffer_heads so far in this endio.
501 * Toss the endio too. Only ever called for the initial page
502 * in a writepage request, so only ever one page.
509 struct buffer_head
*bh
, *next_bh
;
512 next
= ioend
->io_list
;
513 bh
= ioend
->io_buffer_head
;
515 next_bh
= bh
->b_private
;
516 clear_buffer_async_write(bh
);
518 } while ((bh
= next_bh
) != NULL
);
520 xfs_ioend_wake(XFS_I(ioend
->io_inode
));
521 mempool_free(ioend
, xfs_ioend_pool
);
522 } while ((ioend
= next
) != NULL
);
526 * Test to see if we've been building up a completion structure for
527 * earlier buffers -- if so, we try to append to this ioend if we
528 * can, otherwise we finish off any current ioend and start another.
529 * Return true if we've finished the given ioend.
534 struct buffer_head
*bh
,
537 xfs_ioend_t
**result
,
540 xfs_ioend_t
*ioend
= *result
;
542 if (!ioend
|| need_ioend
|| type
!= ioend
->io_type
) {
543 xfs_ioend_t
*previous
= *result
;
545 ioend
= xfs_alloc_ioend(inode
, type
);
546 ioend
->io_offset
= offset
;
547 ioend
->io_buffer_head
= bh
;
548 ioend
->io_buffer_tail
= bh
;
550 previous
->io_list
= ioend
;
553 ioend
->io_buffer_tail
->b_private
= bh
;
554 ioend
->io_buffer_tail
= bh
;
557 bh
->b_private
= NULL
;
558 ioend
->io_size
+= bh
->b_size
;
564 struct buffer_head
*bh
,
569 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
570 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, mp
->iomap_offset
);
572 ASSERT(mp
->iomap_bn
!= IOMAP_DADDR_NULL
);
574 bn
= (mp
->iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
)) +
575 ((offset
- iomap_offset
) >> inode
->i_blkbits
);
577 ASSERT(bn
|| XFS_IS_REALTIME_INODE(XFS_I(inode
)));
580 set_buffer_mapped(bh
);
586 struct buffer_head
*bh
,
590 ASSERT(!(iomapp
->iomap_flags
& IOMAP_HOLE
));
591 ASSERT(!(iomapp
->iomap_flags
& IOMAP_DELAY
));
594 xfs_map_buffer(inode
, bh
, iomapp
, offset
);
595 bh
->b_bdev
= xfs_find_bdev_for_inode(inode
);
596 set_buffer_mapped(bh
);
597 clear_buffer_delay(bh
);
598 clear_buffer_unwritten(bh
);
602 * Look for a page at index that is suitable for clustering.
607 unsigned int pg_offset
,
612 if (PageWriteback(page
))
615 if (page
->mapping
&& PageDirty(page
)) {
616 if (page_has_buffers(page
)) {
617 struct buffer_head
*bh
, *head
;
619 bh
= head
= page_buffers(page
);
621 if (!buffer_uptodate(bh
))
623 if (mapped
!= buffer_mapped(bh
))
626 if (ret
>= pg_offset
)
628 } while ((bh
= bh
->b_this_page
) != head
);
630 ret
= mapped
? 0 : PAGE_CACHE_SIZE
;
639 struct page
*startpage
,
640 struct buffer_head
*bh
,
641 struct buffer_head
*head
,
645 pgoff_t tindex
, tlast
, tloff
;
649 /* First sum forwards in this page */
651 if (!buffer_uptodate(bh
) || (mapped
!= buffer_mapped(bh
)))
654 } while ((bh
= bh
->b_this_page
) != head
);
656 /* if we reached the end of the page, sum forwards in following pages */
657 tlast
= i_size_read(inode
) >> PAGE_CACHE_SHIFT
;
658 tindex
= startpage
->index
+ 1;
660 /* Prune this back to avoid pathological behavior */
661 tloff
= min(tlast
, startpage
->index
+ 64);
663 pagevec_init(&pvec
, 0);
664 while (!done
&& tindex
<= tloff
) {
665 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
667 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
670 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
671 struct page
*page
= pvec
.pages
[i
];
672 size_t pg_offset
, pg_len
= 0;
674 if (tindex
== tlast
) {
676 i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1);
682 pg_offset
= PAGE_CACHE_SIZE
;
684 if (page
->index
== tindex
&& trylock_page(page
)) {
685 pg_len
= xfs_probe_page(page
, pg_offset
, mapped
);
698 pagevec_release(&pvec
);
706 * Test if a given page is suitable for writing as part of an unwritten
707 * or delayed allocate extent.
714 if (PageWriteback(page
))
717 if (page
->mapping
&& page_has_buffers(page
)) {
718 struct buffer_head
*bh
, *head
;
721 bh
= head
= page_buffers(page
);
723 if (buffer_unwritten(bh
))
724 acceptable
= (type
== IOMAP_UNWRITTEN
);
725 else if (buffer_delay(bh
))
726 acceptable
= (type
== IOMAP_DELAY
);
727 else if (buffer_dirty(bh
) && buffer_mapped(bh
))
728 acceptable
= (type
== IOMAP_NEW
);
731 } while ((bh
= bh
->b_this_page
) != head
);
741 * Allocate & map buffers for page given the extent map. Write it out.
742 * except for the original page of a writepage, this is called on
743 * delalloc/unwritten pages only, for the original page it is possible
744 * that the page has no mapping at all.
752 xfs_ioend_t
**ioendp
,
753 struct writeback_control
*wbc
,
757 struct buffer_head
*bh
, *head
;
758 xfs_off_t end_offset
;
759 unsigned long p_offset
;
762 int count
= 0, done
= 0, uptodate
= 1;
763 xfs_off_t offset
= page_offset(page
);
765 if (page
->index
!= tindex
)
767 if (!trylock_page(page
))
769 if (PageWriteback(page
))
770 goto fail_unlock_page
;
771 if (page
->mapping
!= inode
->i_mapping
)
772 goto fail_unlock_page
;
773 if (!xfs_is_delayed_page(page
, (*ioendp
)->io_type
))
774 goto fail_unlock_page
;
777 * page_dirty is initially a count of buffers on the page before
778 * EOF and is decremented as we move each into a cleanable state.
782 * End offset is the highest offset that this page should represent.
783 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
784 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
785 * hence give us the correct page_dirty count. On any other page,
786 * it will be zero and in that case we need page_dirty to be the
787 * count of buffers on the page.
789 end_offset
= min_t(unsigned long long,
790 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
,
793 len
= 1 << inode
->i_blkbits
;
794 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
796 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
797 page_dirty
= p_offset
/ len
;
799 bh
= head
= page_buffers(page
);
801 if (offset
>= end_offset
)
803 if (!buffer_uptodate(bh
))
805 if (!(PageUptodate(page
) || buffer_uptodate(bh
))) {
810 if (buffer_unwritten(bh
) || buffer_delay(bh
)) {
811 if (buffer_unwritten(bh
))
812 type
= IOMAP_UNWRITTEN
;
816 if (!xfs_iomap_valid(inode
, mp
, offset
)) {
821 ASSERT(!(mp
->iomap_flags
& IOMAP_HOLE
));
822 ASSERT(!(mp
->iomap_flags
& IOMAP_DELAY
));
824 xfs_map_at_offset(inode
, bh
, mp
, offset
);
826 xfs_add_to_ioend(inode
, bh
, offset
,
829 set_buffer_dirty(bh
);
831 mark_buffer_dirty(bh
);
837 if (buffer_mapped(bh
) && all_bh
&& startio
) {
839 xfs_add_to_ioend(inode
, bh
, offset
,
847 } while (offset
+= len
, (bh
= bh
->b_this_page
) != head
);
849 if (uptodate
&& bh
== head
)
850 SetPageUptodate(page
);
855 if (wbc
->nr_to_write
<= 0)
858 xfs_start_page_writeback(page
, !page_dirty
, count
);
869 * Convert & write out a cluster of pages in the same extent as defined
870 * by mp and following the start page.
877 xfs_ioend_t
**ioendp
,
878 struct writeback_control
*wbc
,
886 pagevec_init(&pvec
, 0);
887 while (!done
&& tindex
<= tlast
) {
888 unsigned len
= min_t(pgoff_t
, PAGEVEC_SIZE
, tlast
- tindex
+ 1);
890 if (!pagevec_lookup(&pvec
, inode
->i_mapping
, tindex
, len
))
893 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
894 done
= xfs_convert_page(inode
, pvec
.pages
[i
], tindex
++,
895 iomapp
, ioendp
, wbc
, startio
, all_bh
);
900 pagevec_release(&pvec
);
906 xfs_vm_invalidatepage(
908 unsigned long offset
)
910 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
);
911 block_invalidatepage(page
, offset
);
915 * If the page has delalloc buffers on it, we need to punch them out before we
916 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
917 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
918 * is done on that same region - the delalloc extent is returned when none is
919 * supposed to be there.
921 * We prevent this by truncating away the delalloc regions on the page before
922 * invalidating it. Because they are delalloc, we can do this without needing a
923 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
924 * truncation without a transaction as there is no space left for block
925 * reservation (typically why we see a ENOSPC in writeback).
927 * This is not a performance critical path, so for now just do the punching a
928 * buffer head at a time.
931 xfs_aops_discard_page(
934 struct inode
*inode
= page
->mapping
->host
;
935 struct xfs_inode
*ip
= XFS_I(inode
);
936 struct buffer_head
*bh
, *head
;
937 loff_t offset
= page_offset(page
);
938 ssize_t len
= 1 << inode
->i_blkbits
;
940 if (!xfs_is_delayed_page(page
, IOMAP_DELAY
))
943 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
946 xfs_fs_cmn_err(CE_ALERT
, ip
->i_mount
,
947 "page discard on page %p, inode 0x%llx, offset %llu.",
948 page
, ip
->i_ino
, offset
);
950 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
951 bh
= head
= page_buffers(page
);
954 xfs_fileoff_t offset_fsb
;
955 xfs_bmbt_irec_t imap
;
958 xfs_fsblock_t firstblock
;
959 xfs_bmap_free_t flist
;
961 if (!buffer_delay(bh
))
964 offset_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
967 * Map the range first and check that it is a delalloc extent
968 * before trying to unmap the range. Otherwise we will be
969 * trying to remove a real extent (which requires a
970 * transaction) or a hole, which is probably a bad idea...
972 error
= xfs_bmapi(NULL
, ip
, offset_fsb
, 1,
973 XFS_BMAPI_ENTIRE
, NULL
, 0, &imap
,
974 &nimaps
, NULL
, NULL
);
977 /* something screwed, just bail */
978 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
979 xfs_fs_cmn_err(CE_ALERT
, ip
->i_mount
,
980 "page discard failed delalloc mapping lookup.");
988 if (imap
.br_startblock
!= DELAYSTARTBLOCK
) {
989 /* been converted, ignore */
992 WARN_ON(imap
.br_blockcount
== 0);
995 * Note: while we initialise the firstblock/flist pair, they
996 * should never be used because blocks should never be
997 * allocated or freed for a delalloc extent and hence we need
998 * don't cancel or finish them after the xfs_bunmapi() call.
1000 xfs_bmap_init(&flist
, &firstblock
);
1001 error
= xfs_bunmapi(NULL
, ip
, offset_fsb
, 1, 0, 1, &firstblock
,
1002 &flist
, NULL
, &done
);
1004 ASSERT(!flist
.xbf_count
&& !flist
.xbf_first
);
1006 /* something screwed, just bail */
1007 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
1008 xfs_fs_cmn_err(CE_ALERT
, ip
->i_mount
,
1009 "page discard unable to remove delalloc mapping.");
1016 } while ((bh
= bh
->b_this_page
) != head
);
1018 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1020 xfs_vm_invalidatepage(page
, 0);
1025 * Calling this without startio set means we are being asked to make a dirty
1026 * page ready for freeing it's buffers. When called with startio set then
1027 * we are coming from writepage.
1029 * When called with startio set it is important that we write the WHOLE
1031 * The bh->b_state's cannot know if any of the blocks or which block for
1032 * that matter are dirty due to mmap writes, and therefore bh uptodate is
1033 * only valid if the page itself isn't completely uptodate. Some layers
1034 * may clear the page dirty flag prior to calling write page, under the
1035 * assumption the entire page will be written out; by not writing out the
1036 * whole page the page can be reused before all valid dirty data is
1037 * written out. Note: in the case of a page that has been dirty'd by
1038 * mapwrite and but partially setup by block_prepare_write the
1039 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
1040 * valid state, thus the whole page must be written out thing.
1044 xfs_page_state_convert(
1045 struct inode
*inode
,
1047 struct writeback_control
*wbc
,
1049 int unmapped
) /* also implies page uptodate */
1051 struct buffer_head
*bh
, *head
;
1053 xfs_ioend_t
*ioend
= NULL
, *iohead
= NULL
;
1055 unsigned long p_offset
= 0;
1057 __uint64_t end_offset
;
1058 pgoff_t end_index
, last_index
, tlast
;
1060 int flags
, err
, iomap_valid
= 0, uptodate
= 1;
1061 int page_dirty
, count
= 0;
1063 int all_bh
= unmapped
;
1066 if (wbc
->sync_mode
== WB_SYNC_NONE
&& wbc
->nonblocking
)
1067 trylock
|= BMAPI_TRYLOCK
;
1070 /* Is this page beyond the end of the file? */
1071 offset
= i_size_read(inode
);
1072 end_index
= offset
>> PAGE_CACHE_SHIFT
;
1073 last_index
= (offset
- 1) >> PAGE_CACHE_SHIFT
;
1074 if (page
->index
>= end_index
) {
1075 if ((page
->index
>= end_index
+ 1) ||
1076 !(i_size_read(inode
) & (PAGE_CACHE_SIZE
- 1))) {
1084 * page_dirty is initially a count of buffers on the page before
1085 * EOF and is decremented as we move each into a cleanable state.
1089 * End offset is the highest offset that this page should represent.
1090 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
1091 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
1092 * hence give us the correct page_dirty count. On any other page,
1093 * it will be zero and in that case we need page_dirty to be the
1094 * count of buffers on the page.
1096 end_offset
= min_t(unsigned long long,
1097 (xfs_off_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
, offset
);
1098 len
= 1 << inode
->i_blkbits
;
1099 p_offset
= min_t(unsigned long, end_offset
& (PAGE_CACHE_SIZE
- 1),
1101 p_offset
= p_offset
? roundup(p_offset
, len
) : PAGE_CACHE_SIZE
;
1102 page_dirty
= p_offset
/ len
;
1104 bh
= head
= page_buffers(page
);
1105 offset
= page_offset(page
);
1109 /* TODO: cleanup count and page_dirty */
1112 if (offset
>= end_offset
)
1114 if (!buffer_uptodate(bh
))
1116 if (!(PageUptodate(page
) || buffer_uptodate(bh
)) && !startio
) {
1118 * the iomap is actually still valid, but the ioend
1119 * isn't. shouldn't happen too often.
1126 iomap_valid
= xfs_iomap_valid(inode
, &iomap
, offset
);
1129 * First case, map an unwritten extent and prepare for
1130 * extent state conversion transaction on completion.
1132 * Second case, allocate space for a delalloc buffer.
1133 * We can return EAGAIN here in the release page case.
1135 * Third case, an unmapped buffer was found, and we are
1136 * in a path where we need to write the whole page out.
1138 if (buffer_unwritten(bh
) || buffer_delay(bh
) ||
1139 ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
1140 !buffer_mapped(bh
) && (unmapped
|| startio
))) {
1144 * Make sure we don't use a read-only iomap
1146 if (flags
== BMAPI_READ
)
1149 if (buffer_unwritten(bh
)) {
1150 type
= IOMAP_UNWRITTEN
;
1151 flags
= BMAPI_WRITE
| BMAPI_IGNSTATE
;
1152 } else if (buffer_delay(bh
)) {
1154 flags
= BMAPI_ALLOCATE
| trylock
;
1157 flags
= BMAPI_WRITE
| BMAPI_MMAP
;
1162 * if we didn't have a valid mapping then we
1163 * need to ensure that we put the new mapping
1164 * in a new ioend structure. This needs to be
1165 * done to ensure that the ioends correctly
1166 * reflect the block mappings at io completion
1167 * for unwritten extent conversion.
1170 if (type
== IOMAP_NEW
) {
1171 size
= xfs_probe_cluster(inode
,
1177 err
= xfs_map_blocks(inode
, offset
, size
,
1181 iomap_valid
= xfs_iomap_valid(inode
, &iomap
, offset
);
1184 xfs_map_at_offset(inode
, bh
, &iomap
, offset
);
1186 xfs_add_to_ioend(inode
, bh
, offset
,
1190 set_buffer_dirty(bh
);
1192 mark_buffer_dirty(bh
);
1197 } else if (buffer_uptodate(bh
) && startio
) {
1199 * we got here because the buffer is already mapped.
1200 * That means it must already have extents allocated
1201 * underneath it. Map the extent by reading it.
1203 if (!iomap_valid
|| flags
!= BMAPI_READ
) {
1205 size
= xfs_probe_cluster(inode
, page
, bh
,
1207 err
= xfs_map_blocks(inode
, offset
, size
,
1211 iomap_valid
= xfs_iomap_valid(inode
, &iomap
, offset
);
1215 * We set the type to IOMAP_NEW in case we are doing a
1216 * small write at EOF that is extending the file but
1217 * without needing an allocation. We need to update the
1218 * file size on I/O completion in this case so it is
1219 * the same case as having just allocated a new extent
1220 * that we are writing into for the first time.
1223 if (trylock_buffer(bh
)) {
1224 ASSERT(buffer_mapped(bh
));
1227 xfs_add_to_ioend(inode
, bh
, offset
, type
,
1228 &ioend
, !iomap_valid
);
1234 } else if ((buffer_uptodate(bh
) || PageUptodate(page
)) &&
1235 (unmapped
|| startio
)) {
1242 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
1244 if (uptodate
&& bh
== head
)
1245 SetPageUptodate(page
);
1248 xfs_start_page_writeback(page
, 1, count
);
1250 if (ioend
&& iomap_valid
) {
1251 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
1252 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, iomap
.iomap_offset
);
1253 xfs_off_t iomap_bsize
= XFS_FSB_TO_B(m
, iomap
.iomap_bsize
);
1255 offset
= (iomap_offset
+ iomap_bsize
- 1) >>
1257 tlast
= min_t(pgoff_t
, offset
, last_index
);
1258 xfs_cluster_write(inode
, page
->index
+ 1, &iomap
, &ioend
,
1259 wbc
, startio
, all_bh
, tlast
);
1263 xfs_submit_ioend(wbc
, iohead
);
1269 xfs_cancel_ioend(iohead
);
1272 * If it's delalloc and we have nowhere to put it,
1273 * throw it away, unless the lower layers told
1276 if (err
!= -EAGAIN
) {
1278 xfs_aops_discard_page(page
);
1279 ClearPageUptodate(page
);
1285 * writepage: Called from one of two places:
1287 * 1. we are flushing a delalloc buffer head.
1289 * 2. we are writing out a dirty page. Typically the page dirty
1290 * state is cleared before we get here. In this case is it
1291 * conceivable we have no buffer heads.
1293 * For delalloc space on the page we need to allocate space and
1294 * flush it. For unmapped buffer heads on the page we should
1295 * allocate space if the page is uptodate. For any other dirty
1296 * buffer heads on the page we should flush them.
1298 * If we detect that a transaction would be required to flush
1299 * the page, we have to check the process flags first, if we
1300 * are already in a transaction or disk I/O during allocations
1301 * is off, we need to fail the writepage and redirty the page.
1307 struct writeback_control
*wbc
)
1311 int delalloc
, unmapped
, unwritten
;
1312 struct inode
*inode
= page
->mapping
->host
;
1314 trace_xfs_writepage(inode
, page
, 0);
1317 * We need a transaction if:
1318 * 1. There are delalloc buffers on the page
1319 * 2. The page is uptodate and we have unmapped buffers
1320 * 3. The page is uptodate and we have no buffers
1321 * 4. There are unwritten buffers on the page
1324 if (!page_has_buffers(page
)) {
1328 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1329 if (!PageUptodate(page
))
1331 need_trans
= delalloc
+ unmapped
+ unwritten
;
1335 * If we need a transaction and the process flags say
1336 * we are already in a transaction, or no IO is allowed
1337 * then mark the page dirty again and leave the page
1340 if (current_test_flags(PF_FSTRANS
) && need_trans
)
1344 * Delay hooking up buffer heads until we have
1345 * made our go/no-go decision.
1347 if (!page_has_buffers(page
))
1348 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
1352 * VM calculation for nr_to_write seems off. Bump it way
1353 * up, this gets simple streaming writes zippy again.
1354 * To be reviewed again after Jens' writeback changes.
1356 wbc
->nr_to_write
*= 4;
1359 * Convert delayed allocate, unwritten or unmapped space
1360 * to real space and flush out to disk.
1362 error
= xfs_page_state_convert(inode
, page
, wbc
, 1, unmapped
);
1363 if (error
== -EAGAIN
)
1365 if (unlikely(error
< 0))
1371 redirty_page_for_writepage(wbc
, page
);
1381 struct address_space
*mapping
,
1382 struct writeback_control
*wbc
)
1384 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1385 return generic_writepages(mapping
, wbc
);
1389 * Called to move a page into cleanable state - and from there
1390 * to be released. Possibly the page is already clean. We always
1391 * have buffer heads in this call.
1393 * Returns 0 if the page is ok to release, 1 otherwise.
1395 * Possible scenarios are:
1397 * 1. We are being called to release a page which has been written
1398 * to via regular I/O. buffer heads will be dirty and possibly
1399 * delalloc. If no delalloc buffer heads in this case then we
1400 * can just return zero.
1402 * 2. We are called to release a page which has been written via
1403 * mmap, all we need to do is ensure there is no delalloc
1404 * state in the buffer heads, if not we can let the caller
1405 * free them and we should come back later via writepage.
1412 struct inode
*inode
= page
->mapping
->host
;
1413 int dirty
, delalloc
, unmapped
, unwritten
;
1414 struct writeback_control wbc
= {
1415 .sync_mode
= WB_SYNC_ALL
,
1419 trace_xfs_releasepage(inode
, page
, 0);
1421 if (!page_has_buffers(page
))
1424 xfs_count_page_state(page
, &delalloc
, &unmapped
, &unwritten
);
1425 if (!delalloc
&& !unwritten
)
1428 if (!(gfp_mask
& __GFP_FS
))
1431 /* If we are already inside a transaction or the thread cannot
1432 * do I/O, we cannot release this page.
1434 if (current_test_flags(PF_FSTRANS
))
1438 * Convert delalloc space to real space, do not flush the
1439 * data out to disk, that will be done by the caller.
1440 * Never need to allocate space here - we will always
1441 * come back to writepage in that case.
1443 dirty
= xfs_page_state_convert(inode
, page
, &wbc
, 0, 0);
1444 if (dirty
== 0 && !unwritten
)
1449 return try_to_free_buffers(page
);
1454 struct inode
*inode
,
1456 struct buffer_head
*bh_result
,
1459 bmapi_flags_t flags
)
1467 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1468 ASSERT(bh_result
->b_size
>= (1 << inode
->i_blkbits
));
1469 size
= bh_result
->b_size
;
1471 if (!create
&& direct
&& offset
>= i_size_read(inode
))
1474 error
= xfs_iomap(XFS_I(inode
), offset
, size
,
1475 create
? flags
: BMAPI_READ
, &iomap
, &niomap
);
1481 if (iomap
.iomap_bn
!= IOMAP_DADDR_NULL
) {
1483 * For unwritten extents do not report a disk address on
1484 * the read case (treat as if we're reading into a hole).
1486 if (create
|| !(iomap
.iomap_flags
& IOMAP_UNWRITTEN
))
1487 xfs_map_buffer(inode
, bh_result
, &iomap
, offset
);
1488 if (create
&& (iomap
.iomap_flags
& IOMAP_UNWRITTEN
)) {
1490 bh_result
->b_private
= inode
;
1491 set_buffer_unwritten(bh_result
);
1496 * If this is a realtime file, data may be on a different device.
1497 * to that pointed to from the buffer_head b_bdev currently.
1499 bh_result
->b_bdev
= xfs_find_bdev_for_inode(inode
);
1502 * If we previously allocated a block out beyond eof and we are now
1503 * coming back to use it then we will need to flag it as new even if it
1504 * has a disk address.
1506 * With sub-block writes into unwritten extents we also need to mark
1507 * the buffer as new so that the unwritten parts of the buffer gets
1511 ((!buffer_mapped(bh_result
) && !buffer_uptodate(bh_result
)) ||
1512 (offset
>= i_size_read(inode
)) ||
1513 (iomap
.iomap_flags
& (IOMAP_NEW
|IOMAP_UNWRITTEN
))))
1514 set_buffer_new(bh_result
);
1516 if (iomap
.iomap_flags
& IOMAP_DELAY
) {
1519 set_buffer_uptodate(bh_result
);
1520 set_buffer_mapped(bh_result
);
1521 set_buffer_delay(bh_result
);
1525 if (direct
|| size
> (1 << inode
->i_blkbits
)) {
1526 struct xfs_mount
*mp
= XFS_I(inode
)->i_mount
;
1527 xfs_off_t iomap_offset
= XFS_FSB_TO_B(mp
, iomap
.iomap_offset
);
1528 xfs_off_t iomap_delta
= offset
- iomap_offset
;
1529 xfs_off_t iomap_bsize
= XFS_FSB_TO_B(mp
, iomap
.iomap_bsize
);
1531 ASSERT(iomap_bsize
- iomap_delta
> 0);
1532 offset
= min_t(xfs_off_t
,
1533 iomap_bsize
- iomap_delta
, size
);
1534 bh_result
->b_size
= (ssize_t
)min_t(xfs_off_t
, LONG_MAX
, offset
);
1542 struct inode
*inode
,
1544 struct buffer_head
*bh_result
,
1547 return __xfs_get_blocks(inode
, iblock
,
1548 bh_result
, create
, 0, BMAPI_WRITE
);
1552 xfs_get_blocks_direct(
1553 struct inode
*inode
,
1555 struct buffer_head
*bh_result
,
1558 return __xfs_get_blocks(inode
, iblock
,
1559 bh_result
, create
, 1, BMAPI_WRITE
|BMAPI_DIRECT
);
1569 xfs_ioend_t
*ioend
= iocb
->private;
1572 * Non-NULL private data means we need to issue a transaction to
1573 * convert a range from unwritten to written extents. This needs
1574 * to happen from process context but aio+dio I/O completion
1575 * happens from irq context so we need to defer it to a workqueue.
1576 * This is not necessary for synchronous direct I/O, but we do
1577 * it anyway to keep the code uniform and simpler.
1579 * Well, if only it were that simple. Because synchronous direct I/O
1580 * requires extent conversion to occur *before* we return to userspace,
1581 * we have to wait for extent conversion to complete. Look at the
1582 * iocb that has been passed to us to determine if this is AIO or
1583 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1584 * workqueue and wait for it to complete.
1586 * The core direct I/O code might be changed to always call the
1587 * completion handler in the future, in which case all this can
1590 ioend
->io_offset
= offset
;
1591 ioend
->io_size
= size
;
1592 if (ioend
->io_type
== IOMAP_READ
) {
1593 xfs_finish_ioend(ioend
, 0);
1594 } else if (private && size
> 0) {
1595 xfs_finish_ioend(ioend
, is_sync_kiocb(iocb
));
1598 * A direct I/O write ioend starts it's life in unwritten
1599 * state in case they map an unwritten extent. This write
1600 * didn't map an unwritten extent so switch it's completion
1603 ioend
->io_type
= IOMAP_NEW
;
1604 xfs_finish_ioend(ioend
, 0);
1608 * blockdev_direct_IO can return an error even after the I/O
1609 * completion handler was called. Thus we need to protect
1610 * against double-freeing.
1612 iocb
->private = NULL
;
1619 const struct iovec
*iov
,
1621 unsigned long nr_segs
)
1623 struct file
*file
= iocb
->ki_filp
;
1624 struct inode
*inode
= file
->f_mapping
->host
;
1625 struct block_device
*bdev
;
1628 bdev
= xfs_find_bdev_for_inode(inode
);
1630 iocb
->private = xfs_alloc_ioend(inode
, rw
== WRITE
?
1631 IOMAP_UNWRITTEN
: IOMAP_READ
);
1633 ret
= blockdev_direct_IO_no_locking(rw
, iocb
, inode
, bdev
, iov
,
1635 xfs_get_blocks_direct
,
1638 if (unlikely(ret
!= -EIOCBQUEUED
&& iocb
->private))
1639 xfs_destroy_ioend(iocb
->private);
1646 struct address_space
*mapping
,
1650 struct page
**pagep
,
1654 return block_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
1660 struct address_space
*mapping
,
1663 struct inode
*inode
= (struct inode
*)mapping
->host
;
1664 struct xfs_inode
*ip
= XFS_I(inode
);
1666 xfs_itrace_entry(XFS_I(inode
));
1667 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
1668 xfs_flush_pages(ip
, (xfs_off_t
)0, -1, 0, FI_REMAPF
);
1669 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
1670 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1675 struct file
*unused
,
1678 return mpage_readpage(page
, xfs_get_blocks
);
1683 struct file
*unused
,
1684 struct address_space
*mapping
,
1685 struct list_head
*pages
,
1688 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1691 const struct address_space_operations xfs_address_space_operations
= {
1692 .readpage
= xfs_vm_readpage
,
1693 .readpages
= xfs_vm_readpages
,
1694 .writepage
= xfs_vm_writepage
,
1695 .writepages
= xfs_vm_writepages
,
1696 .sync_page
= block_sync_page
,
1697 .releasepage
= xfs_vm_releasepage
,
1698 .invalidatepage
= xfs_vm_invalidatepage
,
1699 .write_begin
= xfs_vm_write_begin
,
1700 .write_end
= generic_write_end
,
1701 .bmap
= xfs_vm_bmap
,
1702 .direct_IO
= xfs_vm_direct_IO
,
1703 .migratepage
= buffer_migrate_page
,
1704 .is_partially_uptodate
= block_is_partially_uptodate
,
1705 .error_remove_page
= generic_error_remove_page
,