1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/uaccess.h>
11 #include <linux/gfp.h>
12 #include <linux/migrate.h>
14 #include <linux/mm_inline.h>
15 #include <linux/swap.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/file.h>
19 #include <linux/uio.h>
20 #include <linux/backing-dev.h>
21 #include <linux/buffer_head.h>
22 #include <linux/task_io_accounting_ops.h>
23 #include <linux/dax.h>
24 #include <linux/sched/signal.h>
29 * Execute a iomap write on a segment of the mapping that spans a
30 * contiguous range of pages that have identical block mapping state.
32 * This avoids the need to map pages individually, do individual allocations
33 * for each page and most importantly avoid the need for filesystem specific
34 * locking per page. Instead, all the operations are amortised over the entire
35 * range of pages. It is assumed that the filesystems will lock whatever
36 * resources they require in the iomap_begin call, and release them in the
40 iomap_apply(struct inode
*inode
, loff_t pos
, loff_t length
, unsigned flags
,
41 const struct iomap_ops
*ops
, void *data
, iomap_actor_t actor
)
43 struct iomap iomap
= { 0 };
44 loff_t written
= 0, ret
;
47 * Need to map a range from start position for length bytes. This can
48 * span multiple pages - it is only guaranteed to return a range of a
49 * single type of pages (e.g. all into a hole, all mapped or all
50 * unwritten). Failure at this point has nothing to undo.
52 * If allocation is required for this range, reserve the space now so
53 * that the allocation is guaranteed to succeed later on. Once we copy
54 * the data into the page cache pages, then we cannot fail otherwise we
55 * expose transient stale data. If the reserve fails, we can safely
56 * back out at this point as there is nothing to undo.
58 ret
= ops
->iomap_begin(inode
, pos
, length
, flags
, &iomap
);
61 if (WARN_ON(iomap
.offset
> pos
))
63 if (WARN_ON(iomap
.length
== 0))
67 * Cut down the length to the one actually provided by the filesystem,
68 * as it might not be able to give us the whole size that we requested.
70 if (iomap
.offset
+ iomap
.length
< pos
+ length
)
71 length
= iomap
.offset
+ iomap
.length
- pos
;
74 * Now that we have guaranteed that the space allocation will succeed.
75 * we can do the copy-in page by page without having to worry about
76 * failures exposing transient data.
78 written
= actor(inode
, pos
, length
, data
, &iomap
);
81 * Now the data has been copied, commit the range we've copied. This
82 * should not fail unless the filesystem has had a fatal error.
85 ret
= ops
->iomap_end(inode
, pos
, length
,
86 written
> 0 ? written
: 0,
90 return written
? written
: ret
;
94 iomap_sector(struct iomap
*iomap
, loff_t pos
)
96 return (iomap
->addr
+ pos
- iomap
->offset
) >> SECTOR_SHIFT
;
99 static struct iomap_page
*
100 iomap_page_create(struct inode
*inode
, struct page
*page
)
102 struct iomap_page
*iop
= to_iomap_page(page
);
104 if (iop
|| i_blocksize(inode
) == PAGE_SIZE
)
107 iop
= kmalloc(sizeof(*iop
), GFP_NOFS
| __GFP_NOFAIL
);
108 atomic_set(&iop
->read_count
, 0);
109 atomic_set(&iop
->write_count
, 0);
110 bitmap_zero(iop
->uptodate
, PAGE_SIZE
/ SECTOR_SIZE
);
113 * migrate_page_move_mapping() assumes that pages with private data have
114 * their count elevated by 1.
117 set_page_private(page
, (unsigned long)iop
);
118 SetPagePrivate(page
);
123 iomap_page_release(struct page
*page
)
125 struct iomap_page
*iop
= to_iomap_page(page
);
129 WARN_ON_ONCE(atomic_read(&iop
->read_count
));
130 WARN_ON_ONCE(atomic_read(&iop
->write_count
));
131 ClearPagePrivate(page
);
132 set_page_private(page
, 0);
138 * Calculate the range inside the page that we actually need to read.
141 iomap_adjust_read_range(struct inode
*inode
, struct iomap_page
*iop
,
142 loff_t
*pos
, loff_t length
, unsigned *offp
, unsigned *lenp
)
144 loff_t orig_pos
= *pos
;
145 loff_t isize
= i_size_read(inode
);
146 unsigned block_bits
= inode
->i_blkbits
;
147 unsigned block_size
= (1 << block_bits
);
148 unsigned poff
= offset_in_page(*pos
);
149 unsigned plen
= min_t(loff_t
, PAGE_SIZE
- poff
, length
);
150 unsigned first
= poff
>> block_bits
;
151 unsigned last
= (poff
+ plen
- 1) >> block_bits
;
154 * If the block size is smaller than the page size we need to check the
155 * per-block uptodate status and adjust the offset and length if needed
156 * to avoid reading in already uptodate ranges.
161 /* move forward for each leading block marked uptodate */
162 for (i
= first
; i
<= last
; i
++) {
163 if (!test_bit(i
, iop
->uptodate
))
171 /* truncate len if we find any trailing uptodate block(s) */
172 for ( ; i
<= last
; i
++) {
173 if (test_bit(i
, iop
->uptodate
)) {
174 plen
-= (last
- i
+ 1) * block_size
;
182 * If the extent spans the block that contains the i_size we need to
183 * handle both halves separately so that we properly zero data in the
184 * page cache for blocks that are entirely outside of i_size.
186 if (orig_pos
<= isize
&& orig_pos
+ length
> isize
) {
187 unsigned end
= offset_in_page(isize
- 1) >> block_bits
;
189 if (first
<= end
&& last
> end
)
190 plen
-= (last
- end
) * block_size
;
198 iomap_set_range_uptodate(struct page
*page
, unsigned off
, unsigned len
)
200 struct iomap_page
*iop
= to_iomap_page(page
);
201 struct inode
*inode
= page
->mapping
->host
;
202 unsigned first
= off
>> inode
->i_blkbits
;
203 unsigned last
= (off
+ len
- 1) >> inode
->i_blkbits
;
205 bool uptodate
= true;
208 for (i
= 0; i
< PAGE_SIZE
/ i_blocksize(inode
); i
++) {
209 if (i
>= first
&& i
<= last
)
210 set_bit(i
, iop
->uptodate
);
211 else if (!test_bit(i
, iop
->uptodate
))
216 if (uptodate
&& !PageError(page
))
217 SetPageUptodate(page
);
221 iomap_read_finish(struct iomap_page
*iop
, struct page
*page
)
223 if (!iop
|| atomic_dec_and_test(&iop
->read_count
))
228 iomap_read_page_end_io(struct bio_vec
*bvec
, int error
)
230 struct page
*page
= bvec
->bv_page
;
231 struct iomap_page
*iop
= to_iomap_page(page
);
233 if (unlikely(error
)) {
234 ClearPageUptodate(page
);
237 iomap_set_range_uptodate(page
, bvec
->bv_offset
, bvec
->bv_len
);
240 iomap_read_finish(iop
, page
);
244 iomap_read_end_io(struct bio
*bio
)
246 int error
= blk_status_to_errno(bio
->bi_status
);
247 struct bio_vec
*bvec
;
248 struct bvec_iter_all iter_all
;
250 bio_for_each_segment_all(bvec
, bio
, iter_all
)
251 iomap_read_page_end_io(bvec
, error
);
255 struct iomap_readpage_ctx
{
256 struct page
*cur_page
;
257 bool cur_page_in_bio
;
260 struct list_head
*pages
;
264 iomap_read_inline_data(struct inode
*inode
, struct page
*page
,
267 size_t size
= i_size_read(inode
);
270 if (PageUptodate(page
))
274 BUG_ON(size
> PAGE_SIZE
- offset_in_page(iomap
->inline_data
));
276 addr
= kmap_atomic(page
);
277 memcpy(addr
, iomap
->inline_data
, size
);
278 memset(addr
+ size
, 0, PAGE_SIZE
- size
);
280 SetPageUptodate(page
);
284 iomap_readpage_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
287 struct iomap_readpage_ctx
*ctx
= data
;
288 struct page
*page
= ctx
->cur_page
;
289 struct iomap_page
*iop
= iomap_page_create(inode
, page
);
290 bool same_page
= false, is_contig
= false;
291 loff_t orig_pos
= pos
;
295 if (iomap
->type
== IOMAP_INLINE
) {
297 iomap_read_inline_data(inode
, page
, iomap
);
301 /* zero post-eof blocks as the page may be mapped */
302 iomap_adjust_read_range(inode
, iop
, &pos
, length
, &poff
, &plen
);
306 if (iomap
->type
!= IOMAP_MAPPED
|| pos
>= i_size_read(inode
)) {
307 zero_user(page
, poff
, plen
);
308 iomap_set_range_uptodate(page
, poff
, plen
);
312 ctx
->cur_page_in_bio
= true;
315 * Try to merge into a previous segment if we can.
317 sector
= iomap_sector(iomap
, pos
);
318 if (ctx
->bio
&& bio_end_sector(ctx
->bio
) == sector
)
322 __bio_try_merge_page(ctx
->bio
, page
, plen
, poff
, &same_page
)) {
323 if (!same_page
&& iop
)
324 atomic_inc(&iop
->read_count
);
329 * If we start a new segment we need to increase the read count, and we
330 * need to do so before submitting any previous full bio to make sure
331 * that we don't prematurely unlock the page.
334 atomic_inc(&iop
->read_count
);
336 if (!ctx
->bio
|| !is_contig
|| bio_full(ctx
->bio
, plen
)) {
337 gfp_t gfp
= mapping_gfp_constraint(page
->mapping
, GFP_KERNEL
);
338 int nr_vecs
= (length
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
341 submit_bio(ctx
->bio
);
343 if (ctx
->is_readahead
) /* same as readahead_gfp_mask */
344 gfp
|= __GFP_NORETRY
| __GFP_NOWARN
;
345 ctx
->bio
= bio_alloc(gfp
, min(BIO_MAX_PAGES
, nr_vecs
));
346 ctx
->bio
->bi_opf
= REQ_OP_READ
;
347 if (ctx
->is_readahead
)
348 ctx
->bio
->bi_opf
|= REQ_RAHEAD
;
349 ctx
->bio
->bi_iter
.bi_sector
= sector
;
350 bio_set_dev(ctx
->bio
, iomap
->bdev
);
351 ctx
->bio
->bi_end_io
= iomap_read_end_io
;
354 bio_add_page(ctx
->bio
, page
, plen
, poff
);
357 * Move the caller beyond our range so that it keeps making progress.
358 * For that we have to include any leading non-uptodate ranges, but
359 * we can skip trailing ones as they will be handled in the next
362 return pos
- orig_pos
+ plen
;
366 iomap_readpage(struct page
*page
, const struct iomap_ops
*ops
)
368 struct iomap_readpage_ctx ctx
= { .cur_page
= page
};
369 struct inode
*inode
= page
->mapping
->host
;
373 for (poff
= 0; poff
< PAGE_SIZE
; poff
+= ret
) {
374 ret
= iomap_apply(inode
, page_offset(page
) + poff
,
375 PAGE_SIZE
- poff
, 0, ops
, &ctx
,
376 iomap_readpage_actor
);
378 WARN_ON_ONCE(ret
== 0);
386 WARN_ON_ONCE(!ctx
.cur_page_in_bio
);
388 WARN_ON_ONCE(ctx
.cur_page_in_bio
);
393 * Just like mpage_readpages and block_read_full_page we always
394 * return 0 and just mark the page as PageError on errors. This
395 * should be cleaned up all through the stack eventually.
399 EXPORT_SYMBOL_GPL(iomap_readpage
);
402 iomap_next_page(struct inode
*inode
, struct list_head
*pages
, loff_t pos
,
403 loff_t length
, loff_t
*done
)
405 while (!list_empty(pages
)) {
406 struct page
*page
= lru_to_page(pages
);
408 if (page_offset(page
) >= (u64
)pos
+ length
)
411 list_del(&page
->lru
);
412 if (!add_to_page_cache_lru(page
, inode
->i_mapping
, page
->index
,
417 * If we already have a page in the page cache at index we are
418 * done. Upper layers don't care if it is uptodate after the
419 * readpages call itself as every page gets checked again once
430 iomap_readpages_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
431 void *data
, struct iomap
*iomap
)
433 struct iomap_readpage_ctx
*ctx
= data
;
436 for (done
= 0; done
< length
; done
+= ret
) {
437 if (ctx
->cur_page
&& offset_in_page(pos
+ done
) == 0) {
438 if (!ctx
->cur_page_in_bio
)
439 unlock_page(ctx
->cur_page
);
440 put_page(ctx
->cur_page
);
441 ctx
->cur_page
= NULL
;
443 if (!ctx
->cur_page
) {
444 ctx
->cur_page
= iomap_next_page(inode
, ctx
->pages
,
448 ctx
->cur_page_in_bio
= false;
450 ret
= iomap_readpage_actor(inode
, pos
+ done
, length
- done
,
458 iomap_readpages(struct address_space
*mapping
, struct list_head
*pages
,
459 unsigned nr_pages
, const struct iomap_ops
*ops
)
461 struct iomap_readpage_ctx ctx
= {
463 .is_readahead
= true,
465 loff_t pos
= page_offset(list_entry(pages
->prev
, struct page
, lru
));
466 loff_t last
= page_offset(list_entry(pages
->next
, struct page
, lru
));
467 loff_t length
= last
- pos
+ PAGE_SIZE
, ret
= 0;
470 ret
= iomap_apply(mapping
->host
, pos
, length
, 0, ops
,
471 &ctx
, iomap_readpages_actor
);
473 WARN_ON_ONCE(ret
== 0);
484 if (!ctx
.cur_page_in_bio
)
485 unlock_page(ctx
.cur_page
);
486 put_page(ctx
.cur_page
);
490 * Check that we didn't lose a page due to the arcance calling
493 WARN_ON_ONCE(!ret
&& !list_empty(ctx
.pages
));
496 EXPORT_SYMBOL_GPL(iomap_readpages
);
499 * iomap_is_partially_uptodate checks whether blocks within a page are
502 * Returns true if all blocks which correspond to a file portion
503 * we want to read within the page are uptodate.
506 iomap_is_partially_uptodate(struct page
*page
, unsigned long from
,
509 struct iomap_page
*iop
= to_iomap_page(page
);
510 struct inode
*inode
= page
->mapping
->host
;
511 unsigned len
, first
, last
;
514 /* Limit range to one page */
515 len
= min_t(unsigned, PAGE_SIZE
- from
, count
);
517 /* First and last blocks in range within page */
518 first
= from
>> inode
->i_blkbits
;
519 last
= (from
+ len
- 1) >> inode
->i_blkbits
;
522 for (i
= first
; i
<= last
; i
++)
523 if (!test_bit(i
, iop
->uptodate
))
530 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate
);
533 iomap_releasepage(struct page
*page
, gfp_t gfp_mask
)
536 * mm accommodates an old ext3 case where clean pages might not have had
537 * the dirty bit cleared. Thus, it can send actual dirty pages to
538 * ->releasepage() via shrink_active_list(), skip those here.
540 if (PageDirty(page
) || PageWriteback(page
))
542 iomap_page_release(page
);
545 EXPORT_SYMBOL_GPL(iomap_releasepage
);
548 iomap_invalidatepage(struct page
*page
, unsigned int offset
, unsigned int len
)
551 * If we are invalidating the entire page, clear the dirty state from it
552 * and release it to avoid unnecessary buildup of the LRU.
554 if (offset
== 0 && len
== PAGE_SIZE
) {
555 WARN_ON_ONCE(PageWriteback(page
));
556 cancel_dirty_page(page
);
557 iomap_page_release(page
);
560 EXPORT_SYMBOL_GPL(iomap_invalidatepage
);
562 #ifdef CONFIG_MIGRATION
564 iomap_migrate_page(struct address_space
*mapping
, struct page
*newpage
,
565 struct page
*page
, enum migrate_mode mode
)
569 ret
= migrate_page_move_mapping(mapping
, newpage
, page
, mode
, 0);
570 if (ret
!= MIGRATEPAGE_SUCCESS
)
573 if (page_has_private(page
)) {
574 ClearPagePrivate(page
);
576 set_page_private(newpage
, page_private(page
));
577 set_page_private(page
, 0);
579 SetPagePrivate(newpage
);
582 if (mode
!= MIGRATE_SYNC_NO_COPY
)
583 migrate_page_copy(newpage
, page
);
585 migrate_page_states(newpage
, page
);
586 return MIGRATEPAGE_SUCCESS
;
588 EXPORT_SYMBOL_GPL(iomap_migrate_page
);
589 #endif /* CONFIG_MIGRATION */
592 iomap_write_failed(struct inode
*inode
, loff_t pos
, unsigned len
)
594 loff_t i_size
= i_size_read(inode
);
597 * Only truncate newly allocated pages beyoned EOF, even if the
598 * write started inside the existing inode size.
600 if (pos
+ len
> i_size
)
601 truncate_pagecache_range(inode
, max(pos
, i_size
), pos
+ len
);
605 iomap_read_page_sync(struct inode
*inode
, loff_t block_start
, struct page
*page
,
606 unsigned poff
, unsigned plen
, unsigned from
, unsigned to
,
612 if (iomap
->type
!= IOMAP_MAPPED
|| block_start
>= i_size_read(inode
)) {
613 zero_user_segments(page
, poff
, from
, to
, poff
+ plen
);
614 iomap_set_range_uptodate(page
, poff
, plen
);
618 bio_init(&bio
, &bvec
, 1);
619 bio
.bi_opf
= REQ_OP_READ
;
620 bio
.bi_iter
.bi_sector
= iomap_sector(iomap
, block_start
);
621 bio_set_dev(&bio
, iomap
->bdev
);
622 __bio_add_page(&bio
, page
, plen
, poff
);
623 return submit_bio_wait(&bio
);
627 __iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
,
628 struct page
*page
, struct iomap
*iomap
)
630 struct iomap_page
*iop
= iomap_page_create(inode
, page
);
631 loff_t block_size
= i_blocksize(inode
);
632 loff_t block_start
= pos
& ~(block_size
- 1);
633 loff_t block_end
= (pos
+ len
+ block_size
- 1) & ~(block_size
- 1);
634 unsigned from
= offset_in_page(pos
), to
= from
+ len
, poff
, plen
;
637 if (PageUptodate(page
))
641 iomap_adjust_read_range(inode
, iop
, &block_start
,
642 block_end
- block_start
, &poff
, &plen
);
646 if ((from
> poff
&& from
< poff
+ plen
) ||
647 (to
> poff
&& to
< poff
+ plen
)) {
648 status
= iomap_read_page_sync(inode
, block_start
, page
,
649 poff
, plen
, from
, to
, iomap
);
654 } while ((block_start
+= plen
) < block_end
);
660 iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
, unsigned flags
,
661 struct page
**pagep
, struct iomap
*iomap
)
663 const struct iomap_page_ops
*page_ops
= iomap
->page_ops
;
664 pgoff_t index
= pos
>> PAGE_SHIFT
;
668 BUG_ON(pos
+ len
> iomap
->offset
+ iomap
->length
);
670 if (fatal_signal_pending(current
))
673 if (page_ops
&& page_ops
->page_prepare
) {
674 status
= page_ops
->page_prepare(inode
, pos
, len
, iomap
);
679 page
= grab_cache_page_write_begin(inode
->i_mapping
, index
, flags
);
685 if (iomap
->type
== IOMAP_INLINE
)
686 iomap_read_inline_data(inode
, page
, iomap
);
687 else if (iomap
->flags
& IOMAP_F_BUFFER_HEAD
)
688 status
= __block_write_begin_int(page
, pos
, len
, NULL
, iomap
);
690 status
= __iomap_write_begin(inode
, pos
, len
, page
, iomap
);
692 if (unlikely(status
))
701 iomap_write_failed(inode
, pos
, len
);
704 if (page_ops
&& page_ops
->page_done
)
705 page_ops
->page_done(inode
, pos
, 0, NULL
, iomap
);
710 iomap_set_page_dirty(struct page
*page
)
712 struct address_space
*mapping
= page_mapping(page
);
715 if (unlikely(!mapping
))
716 return !TestSetPageDirty(page
);
719 * Lock out page->mem_cgroup migration to keep PageDirty
720 * synchronized with per-memcg dirty page counters.
722 lock_page_memcg(page
);
723 newly_dirty
= !TestSetPageDirty(page
);
725 __set_page_dirty(page
, mapping
, 0);
726 unlock_page_memcg(page
);
729 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
732 EXPORT_SYMBOL_GPL(iomap_set_page_dirty
);
735 __iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
,
736 unsigned copied
, struct page
*page
, struct iomap
*iomap
)
738 flush_dcache_page(page
);
741 * The blocks that were entirely written will now be uptodate, so we
742 * don't have to worry about a readpage reading them and overwriting a
743 * partial write. However if we have encountered a short write and only
744 * partially written into a block, it will not be marked uptodate, so a
745 * readpage might come in and destroy our partial write.
747 * Do the simplest thing, and just treat any short write to a non
748 * uptodate page as a zero-length write, and force the caller to redo
751 if (unlikely(copied
< len
&& !PageUptodate(page
)))
753 iomap_set_range_uptodate(page
, offset_in_page(pos
), len
);
754 iomap_set_page_dirty(page
);
759 iomap_write_end_inline(struct inode
*inode
, struct page
*page
,
760 struct iomap
*iomap
, loff_t pos
, unsigned copied
)
764 WARN_ON_ONCE(!PageUptodate(page
));
765 BUG_ON(pos
+ copied
> PAGE_SIZE
- offset_in_page(iomap
->inline_data
));
767 addr
= kmap_atomic(page
);
768 memcpy(iomap
->inline_data
+ pos
, addr
+ pos
, copied
);
771 mark_inode_dirty(inode
);
776 iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
,
777 unsigned copied
, struct page
*page
, struct iomap
*iomap
)
779 const struct iomap_page_ops
*page_ops
= iomap
->page_ops
;
780 loff_t old_size
= inode
->i_size
;
783 if (iomap
->type
== IOMAP_INLINE
) {
784 ret
= iomap_write_end_inline(inode
, page
, iomap
, pos
, copied
);
785 } else if (iomap
->flags
& IOMAP_F_BUFFER_HEAD
) {
786 ret
= block_write_end(NULL
, inode
->i_mapping
, pos
, len
, copied
,
789 ret
= __iomap_write_end(inode
, pos
, len
, copied
, page
, iomap
);
793 * Update the in-memory inode size after copying the data into the page
794 * cache. It's up to the file system to write the updated size to disk,
795 * preferably after I/O completion so that no stale data is exposed.
797 if (pos
+ ret
> old_size
) {
798 i_size_write(inode
, pos
+ ret
);
799 iomap
->flags
|= IOMAP_F_SIZE_CHANGED
;
804 pagecache_isize_extended(inode
, old_size
, pos
);
805 if (page_ops
&& page_ops
->page_done
)
806 page_ops
->page_done(inode
, pos
, ret
, page
, iomap
);
810 iomap_write_failed(inode
, pos
, len
);
815 iomap_write_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
818 struct iov_iter
*i
= data
;
821 unsigned int flags
= AOP_FLAG_NOFS
;
825 unsigned long offset
; /* Offset into pagecache page */
826 unsigned long bytes
; /* Bytes to write to page */
827 size_t copied
; /* Bytes copied from user */
829 offset
= offset_in_page(pos
);
830 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
837 * Bring in the user page that we will copy from _first_.
838 * Otherwise there's a nasty deadlock on copying from the
839 * same page as we're writing to, without it being marked
842 * Not only is this an optimisation, but it is also required
843 * to check that the address is actually valid, when atomic
844 * usercopies are used, below.
846 if (unlikely(iov_iter_fault_in_readable(i
, bytes
))) {
851 status
= iomap_write_begin(inode
, pos
, bytes
, flags
, &page
,
853 if (unlikely(status
))
856 if (mapping_writably_mapped(inode
->i_mapping
))
857 flush_dcache_page(page
);
859 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, bytes
);
861 flush_dcache_page(page
);
863 status
= iomap_write_end(inode
, pos
, bytes
, copied
, page
,
865 if (unlikely(status
< 0))
871 iov_iter_advance(i
, copied
);
872 if (unlikely(copied
== 0)) {
874 * If we were unable to copy any data at all, we must
875 * fall back to a single segment length write.
877 * If we didn't fallback here, we could livelock
878 * because not all segments in the iov can be copied at
879 * once without a pagefault.
881 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
882 iov_iter_single_seg_count(i
));
889 balance_dirty_pages_ratelimited(inode
->i_mapping
);
890 } while (iov_iter_count(i
) && length
);
892 return written
? written
: status
;
896 iomap_file_buffered_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
897 const struct iomap_ops
*ops
)
899 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
900 loff_t pos
= iocb
->ki_pos
, ret
= 0, written
= 0;
902 while (iov_iter_count(iter
)) {
903 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
),
904 IOMAP_WRITE
, ops
, iter
, iomap_write_actor
);
911 return written
? written
: ret
;
913 EXPORT_SYMBOL_GPL(iomap_file_buffered_write
);
916 __iomap_read_page(struct inode
*inode
, loff_t offset
)
918 struct address_space
*mapping
= inode
->i_mapping
;
921 page
= read_mapping_page(mapping
, offset
>> PAGE_SHIFT
, NULL
);
924 if (!PageUptodate(page
)) {
926 return ERR_PTR(-EIO
);
932 iomap_dirty_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
939 struct page
*page
, *rpage
;
940 unsigned long offset
; /* Offset into pagecache page */
941 unsigned long bytes
; /* Bytes to write to page */
943 offset
= offset_in_page(pos
);
944 bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, length
);
946 rpage
= __iomap_read_page(inode
, pos
);
948 return PTR_ERR(rpage
);
950 status
= iomap_write_begin(inode
, pos
, bytes
,
951 AOP_FLAG_NOFS
, &page
, iomap
);
953 if (unlikely(status
))
956 WARN_ON_ONCE(!PageUptodate(page
));
958 status
= iomap_write_end(inode
, pos
, bytes
, bytes
, page
, iomap
);
959 if (unlikely(status
<= 0)) {
960 if (WARN_ON_ONCE(status
== 0))
971 balance_dirty_pages_ratelimited(inode
->i_mapping
);
978 iomap_file_dirty(struct inode
*inode
, loff_t pos
, loff_t len
,
979 const struct iomap_ops
*ops
)
984 ret
= iomap_apply(inode
, pos
, len
, IOMAP_WRITE
, ops
, NULL
,
994 EXPORT_SYMBOL_GPL(iomap_file_dirty
);
996 static int iomap_zero(struct inode
*inode
, loff_t pos
, unsigned offset
,
997 unsigned bytes
, struct iomap
*iomap
)
1002 status
= iomap_write_begin(inode
, pos
, bytes
, AOP_FLAG_NOFS
, &page
,
1007 zero_user(page
, offset
, bytes
);
1008 mark_page_accessed(page
);
1010 return iomap_write_end(inode
, pos
, bytes
, bytes
, page
, iomap
);
1013 static int iomap_dax_zero(loff_t pos
, unsigned offset
, unsigned bytes
,
1014 struct iomap
*iomap
)
1016 return __dax_zero_page_range(iomap
->bdev
, iomap
->dax_dev
,
1017 iomap_sector(iomap
, pos
& PAGE_MASK
), offset
, bytes
);
1021 iomap_zero_range_actor(struct inode
*inode
, loff_t pos
, loff_t count
,
1022 void *data
, struct iomap
*iomap
)
1024 bool *did_zero
= data
;
1028 /* already zeroed? we're done. */
1029 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
1033 unsigned offset
, bytes
;
1035 offset
= offset_in_page(pos
);
1036 bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, count
);
1039 status
= iomap_dax_zero(pos
, offset
, bytes
, iomap
);
1041 status
= iomap_zero(inode
, pos
, offset
, bytes
, iomap
);
1050 } while (count
> 0);
1056 iomap_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
1057 const struct iomap_ops
*ops
)
1062 ret
= iomap_apply(inode
, pos
, len
, IOMAP_ZERO
,
1063 ops
, did_zero
, iomap_zero_range_actor
);
1073 EXPORT_SYMBOL_GPL(iomap_zero_range
);
1076 iomap_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
1077 const struct iomap_ops
*ops
)
1079 unsigned int blocksize
= i_blocksize(inode
);
1080 unsigned int off
= pos
& (blocksize
- 1);
1082 /* Block boundary? Nothing to do */
1085 return iomap_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
1087 EXPORT_SYMBOL_GPL(iomap_truncate_page
);
1090 iomap_page_mkwrite_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
1091 void *data
, struct iomap
*iomap
)
1093 struct page
*page
= data
;
1096 if (iomap
->flags
& IOMAP_F_BUFFER_HEAD
) {
1097 ret
= __block_write_begin_int(page
, pos
, length
, NULL
, iomap
);
1100 block_commit_write(page
, 0, length
);
1102 WARN_ON_ONCE(!PageUptodate(page
));
1103 iomap_page_create(inode
, page
);
1104 set_page_dirty(page
);
1110 vm_fault_t
iomap_page_mkwrite(struct vm_fault
*vmf
, const struct iomap_ops
*ops
)
1112 struct page
*page
= vmf
->page
;
1113 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
1114 unsigned long length
;
1115 loff_t offset
, size
;
1119 size
= i_size_read(inode
);
1120 if ((page
->mapping
!= inode
->i_mapping
) ||
1121 (page_offset(page
) > size
)) {
1122 /* We overload EFAULT to mean page got truncated */
1127 /* page is wholly or partially inside EOF */
1128 if (((page
->index
+ 1) << PAGE_SHIFT
) > size
)
1129 length
= offset_in_page(size
);
1133 offset
= page_offset(page
);
1134 while (length
> 0) {
1135 ret
= iomap_apply(inode
, offset
, length
,
1136 IOMAP_WRITE
| IOMAP_FAULT
, ops
, page
,
1137 iomap_page_mkwrite_actor
);
1138 if (unlikely(ret
<= 0))
1144 wait_for_stable_page(page
);
1145 return VM_FAULT_LOCKED
;
1148 return block_page_mkwrite_return(ret
);
1150 EXPORT_SYMBOL_GPL(iomap_page_mkwrite
);
1153 struct fiemap_extent_info
*fi
;
1157 static int iomap_to_fiemap(struct fiemap_extent_info
*fi
,
1158 struct iomap
*iomap
, u32 flags
)
1160 switch (iomap
->type
) {
1164 case IOMAP_DELALLOC
:
1165 flags
|= FIEMAP_EXTENT_DELALLOC
| FIEMAP_EXTENT_UNKNOWN
;
1169 case IOMAP_UNWRITTEN
:
1170 flags
|= FIEMAP_EXTENT_UNWRITTEN
;
1173 flags
|= FIEMAP_EXTENT_DATA_INLINE
;
1177 if (iomap
->flags
& IOMAP_F_MERGED
)
1178 flags
|= FIEMAP_EXTENT_MERGED
;
1179 if (iomap
->flags
& IOMAP_F_SHARED
)
1180 flags
|= FIEMAP_EXTENT_SHARED
;
1182 return fiemap_fill_next_extent(fi
, iomap
->offset
,
1183 iomap
->addr
!= IOMAP_NULL_ADDR
? iomap
->addr
: 0,
1184 iomap
->length
, flags
);
1188 iomap_fiemap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
1189 struct iomap
*iomap
)
1191 struct fiemap_ctx
*ctx
= data
;
1192 loff_t ret
= length
;
1194 if (iomap
->type
== IOMAP_HOLE
)
1197 ret
= iomap_to_fiemap(ctx
->fi
, &ctx
->prev
, 0);
1200 case 0: /* success */
1202 case 1: /* extent array full */
1209 int iomap_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fi
,
1210 loff_t start
, loff_t len
, const struct iomap_ops
*ops
)
1212 struct fiemap_ctx ctx
;
1215 memset(&ctx
, 0, sizeof(ctx
));
1217 ctx
.prev
.type
= IOMAP_HOLE
;
1219 ret
= fiemap_check_flags(fi
, FIEMAP_FLAG_SYNC
);
1223 if (fi
->fi_flags
& FIEMAP_FLAG_SYNC
) {
1224 ret
= filemap_write_and_wait(inode
->i_mapping
);
1230 ret
= iomap_apply(inode
, start
, len
, IOMAP_REPORT
, ops
, &ctx
,
1231 iomap_fiemap_actor
);
1232 /* inode with no (attribute) mapping will give ENOENT */
1244 if (ctx
.prev
.type
!= IOMAP_HOLE
) {
1245 ret
= iomap_to_fiemap(fi
, &ctx
.prev
, FIEMAP_EXTENT_LAST
);
1252 EXPORT_SYMBOL_GPL(iomap_fiemap
);
1255 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
1256 * Returns true if found and updates @lastoff to the offset in file.
1259 page_seek_hole_data(struct inode
*inode
, struct page
*page
, loff_t
*lastoff
,
1262 const struct address_space_operations
*ops
= inode
->i_mapping
->a_ops
;
1263 unsigned int bsize
= i_blocksize(inode
), off
;
1264 bool seek_data
= whence
== SEEK_DATA
;
1265 loff_t poff
= page_offset(page
);
1267 if (WARN_ON_ONCE(*lastoff
>= poff
+ PAGE_SIZE
))
1270 if (*lastoff
< poff
) {
1272 * Last offset smaller than the start of the page means we found
1275 if (whence
== SEEK_HOLE
)
1281 * Just check the page unless we can and should check block ranges:
1283 if (bsize
== PAGE_SIZE
|| !ops
->is_partially_uptodate
)
1284 return PageUptodate(page
) == seek_data
;
1287 if (unlikely(page
->mapping
!= inode
->i_mapping
))
1288 goto out_unlock_not_found
;
1290 for (off
= 0; off
< PAGE_SIZE
; off
+= bsize
) {
1291 if (offset_in_page(*lastoff
) >= off
+ bsize
)
1293 if (ops
->is_partially_uptodate(page
, off
, bsize
) == seek_data
) {
1297 *lastoff
= poff
+ off
+ bsize
;
1300 out_unlock_not_found
:
1306 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1308 * Within unwritten extents, the page cache determines which parts are holes
1309 * and which are data: uptodate buffer heads count as data; everything else
1312 * Returns the resulting offset on successs, and -ENOENT otherwise.
1315 page_cache_seek_hole_data(struct inode
*inode
, loff_t offset
, loff_t length
,
1318 pgoff_t index
= offset
>> PAGE_SHIFT
;
1319 pgoff_t end
= DIV_ROUND_UP(offset
+ length
, PAGE_SIZE
);
1320 loff_t lastoff
= offset
;
1321 struct pagevec pvec
;
1326 pagevec_init(&pvec
);
1329 unsigned nr_pages
, i
;
1331 nr_pages
= pagevec_lookup_range(&pvec
, inode
->i_mapping
, &index
,
1336 for (i
= 0; i
< nr_pages
; i
++) {
1337 struct page
*page
= pvec
.pages
[i
];
1339 if (page_seek_hole_data(inode
, page
, &lastoff
, whence
))
1341 lastoff
= page_offset(page
) + PAGE_SIZE
;
1343 pagevec_release(&pvec
);
1344 } while (index
< end
);
1346 /* When no page at lastoff and we are not done, we found a hole. */
1347 if (whence
!= SEEK_HOLE
)
1351 if (lastoff
< offset
+ length
)
1356 pagevec_release(&pvec
);
1362 iomap_seek_hole_actor(struct inode
*inode
, loff_t offset
, loff_t length
,
1363 void *data
, struct iomap
*iomap
)
1365 switch (iomap
->type
) {
1366 case IOMAP_UNWRITTEN
:
1367 offset
= page_cache_seek_hole_data(inode
, offset
, length
,
1373 *(loff_t
*)data
= offset
;
1381 iomap_seek_hole(struct inode
*inode
, loff_t offset
, const struct iomap_ops
*ops
)
1383 loff_t size
= i_size_read(inode
);
1384 loff_t length
= size
- offset
;
1387 /* Nothing to be found before or beyond the end of the file. */
1388 if (offset
< 0 || offset
>= size
)
1391 while (length
> 0) {
1392 ret
= iomap_apply(inode
, offset
, length
, IOMAP_REPORT
, ops
,
1393 &offset
, iomap_seek_hole_actor
);
1405 EXPORT_SYMBOL_GPL(iomap_seek_hole
);
1408 iomap_seek_data_actor(struct inode
*inode
, loff_t offset
, loff_t length
,
1409 void *data
, struct iomap
*iomap
)
1411 switch (iomap
->type
) {
1414 case IOMAP_UNWRITTEN
:
1415 offset
= page_cache_seek_hole_data(inode
, offset
, length
,
1421 *(loff_t
*)data
= offset
;
1427 iomap_seek_data(struct inode
*inode
, loff_t offset
, const struct iomap_ops
*ops
)
1429 loff_t size
= i_size_read(inode
);
1430 loff_t length
= size
- offset
;
1433 /* Nothing to be found before or beyond the end of the file. */
1434 if (offset
< 0 || offset
>= size
)
1437 while (length
> 0) {
1438 ret
= iomap_apply(inode
, offset
, length
, IOMAP_REPORT
, ops
,
1439 &offset
, iomap_seek_data_actor
);
1453 EXPORT_SYMBOL_GPL(iomap_seek_data
);
1456 * Private flags for iomap_dio, must not overlap with the public ones in
1459 #define IOMAP_DIO_WRITE_FUA (1 << 28)
1460 #define IOMAP_DIO_NEED_SYNC (1 << 29)
1461 #define IOMAP_DIO_WRITE (1 << 30)
1462 #define IOMAP_DIO_DIRTY (1 << 31)
1466 iomap_dio_end_io_t
*end_io
;
1472 bool wait_for_completion
;
1475 /* used during submission and for synchronous completion: */
1477 struct iov_iter
*iter
;
1478 struct task_struct
*waiter
;
1479 struct request_queue
*last_queue
;
1483 /* used for aio completion: */
1485 struct work_struct work
;
1490 int iomap_dio_iopoll(struct kiocb
*kiocb
, bool spin
)
1492 struct request_queue
*q
= READ_ONCE(kiocb
->private);
1496 return blk_poll(q
, READ_ONCE(kiocb
->ki_cookie
), spin
);
1498 EXPORT_SYMBOL_GPL(iomap_dio_iopoll
);
1500 static void iomap_dio_submit_bio(struct iomap_dio
*dio
, struct iomap
*iomap
,
1503 atomic_inc(&dio
->ref
);
1505 if (dio
->iocb
->ki_flags
& IOCB_HIPRI
)
1506 bio_set_polled(bio
, dio
->iocb
);
1508 dio
->submit
.last_queue
= bdev_get_queue(iomap
->bdev
);
1509 dio
->submit
.cookie
= submit_bio(bio
);
1512 static ssize_t
iomap_dio_complete(struct iomap_dio
*dio
)
1514 struct kiocb
*iocb
= dio
->iocb
;
1515 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1516 loff_t offset
= iocb
->ki_pos
;
1520 ret
= dio
->end_io(iocb
,
1521 dio
->error
? dio
->error
: dio
->size
,
1529 /* check for short read */
1530 if (offset
+ ret
> dio
->i_size
&&
1531 !(dio
->flags
& IOMAP_DIO_WRITE
))
1532 ret
= dio
->i_size
- offset
;
1533 iocb
->ki_pos
+= ret
;
1537 * Try again to invalidate clean pages which might have been cached by
1538 * non-direct readahead, or faulted in by get_user_pages() if the source
1539 * of the write was an mmap'ed region of the file we're writing. Either
1540 * one is a pretty crazy thing to do, so we don't support it 100%. If
1541 * this invalidation fails, tough, the write still worked...
1543 * And this page cache invalidation has to be after dio->end_io(), as
1544 * some filesystems convert unwritten extents to real allocations in
1545 * end_io() when necessary, otherwise a racing buffer read would cache
1546 * zeros from unwritten extents.
1549 (dio
->flags
& IOMAP_DIO_WRITE
) && inode
->i_mapping
->nrpages
) {
1551 err
= invalidate_inode_pages2_range(inode
->i_mapping
,
1552 offset
>> PAGE_SHIFT
,
1553 (offset
+ dio
->size
- 1) >> PAGE_SHIFT
);
1555 dio_warn_stale_pagecache(iocb
->ki_filp
);
1559 * If this is a DSYNC write, make sure we push it to stable storage now
1560 * that we've written data.
1562 if (ret
> 0 && (dio
->flags
& IOMAP_DIO_NEED_SYNC
))
1563 ret
= generic_write_sync(iocb
, ret
);
1565 inode_dio_end(file_inode(iocb
->ki_filp
));
1571 static void iomap_dio_complete_work(struct work_struct
*work
)
1573 struct iomap_dio
*dio
= container_of(work
, struct iomap_dio
, aio
.work
);
1574 struct kiocb
*iocb
= dio
->iocb
;
1576 iocb
->ki_complete(iocb
, iomap_dio_complete(dio
), 0);
1580 * Set an error in the dio if none is set yet. We have to use cmpxchg
1581 * as the submission context and the completion context(s) can race to
1584 static inline void iomap_dio_set_error(struct iomap_dio
*dio
, int ret
)
1586 cmpxchg(&dio
->error
, 0, ret
);
1589 static void iomap_dio_bio_end_io(struct bio
*bio
)
1591 struct iomap_dio
*dio
= bio
->bi_private
;
1592 bool should_dirty
= (dio
->flags
& IOMAP_DIO_DIRTY
);
1595 iomap_dio_set_error(dio
, blk_status_to_errno(bio
->bi_status
));
1597 if (atomic_dec_and_test(&dio
->ref
)) {
1598 if (dio
->wait_for_completion
) {
1599 struct task_struct
*waiter
= dio
->submit
.waiter
;
1600 WRITE_ONCE(dio
->submit
.waiter
, NULL
);
1601 blk_wake_io_task(waiter
);
1602 } else if (dio
->flags
& IOMAP_DIO_WRITE
) {
1603 struct inode
*inode
= file_inode(dio
->iocb
->ki_filp
);
1605 INIT_WORK(&dio
->aio
.work
, iomap_dio_complete_work
);
1606 queue_work(inode
->i_sb
->s_dio_done_wq
, &dio
->aio
.work
);
1608 iomap_dio_complete_work(&dio
->aio
.work
);
1613 bio_check_pages_dirty(bio
);
1615 bio_release_pages(bio
, false);
1621 iomap_dio_zero(struct iomap_dio
*dio
, struct iomap
*iomap
, loff_t pos
,
1624 struct page
*page
= ZERO_PAGE(0);
1625 int flags
= REQ_SYNC
| REQ_IDLE
;
1628 bio
= bio_alloc(GFP_KERNEL
, 1);
1629 bio_set_dev(bio
, iomap
->bdev
);
1630 bio
->bi_iter
.bi_sector
= iomap_sector(iomap
, pos
);
1631 bio
->bi_private
= dio
;
1632 bio
->bi_end_io
= iomap_dio_bio_end_io
;
1635 __bio_add_page(bio
, page
, len
, 0);
1636 bio_set_op_attrs(bio
, REQ_OP_WRITE
, flags
);
1637 iomap_dio_submit_bio(dio
, iomap
, bio
);
1641 iomap_dio_bio_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
1642 struct iomap_dio
*dio
, struct iomap
*iomap
)
1644 unsigned int blkbits
= blksize_bits(bdev_logical_block_size(iomap
->bdev
));
1645 unsigned int fs_block_size
= i_blocksize(inode
), pad
;
1646 unsigned int align
= iov_iter_alignment(dio
->submit
.iter
);
1647 struct iov_iter iter
;
1649 bool need_zeroout
= false;
1650 bool use_fua
= false;
1651 int nr_pages
, ret
= 0;
1654 if ((pos
| length
| align
) & ((1 << blkbits
) - 1))
1657 if (iomap
->type
== IOMAP_UNWRITTEN
) {
1658 dio
->flags
|= IOMAP_DIO_UNWRITTEN
;
1659 need_zeroout
= true;
1662 if (iomap
->flags
& IOMAP_F_SHARED
)
1663 dio
->flags
|= IOMAP_DIO_COW
;
1665 if (iomap
->flags
& IOMAP_F_NEW
) {
1666 need_zeroout
= true;
1667 } else if (iomap
->type
== IOMAP_MAPPED
) {
1669 * Use a FUA write if we need datasync semantics, this is a pure
1670 * data IO that doesn't require any metadata updates (including
1671 * after IO completion such as unwritten extent conversion) and
1672 * the underlying device supports FUA. This allows us to avoid
1673 * cache flushes on IO completion.
1675 if (!(iomap
->flags
& (IOMAP_F_SHARED
|IOMAP_F_DIRTY
)) &&
1676 (dio
->flags
& IOMAP_DIO_WRITE_FUA
) &&
1677 blk_queue_fua(bdev_get_queue(iomap
->bdev
)))
1682 * Operate on a partial iter trimmed to the extent we were called for.
1683 * We'll update the iter in the dio once we're done with this extent.
1685 iter
= *dio
->submit
.iter
;
1686 iov_iter_truncate(&iter
, length
);
1688 nr_pages
= iov_iter_npages(&iter
, BIO_MAX_PAGES
);
1693 /* zero out from the start of the block to the write offset */
1694 pad
= pos
& (fs_block_size
- 1);
1696 iomap_dio_zero(dio
, iomap
, pos
- pad
, pad
);
1702 iov_iter_revert(dio
->submit
.iter
, copied
);
1706 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
1707 bio_set_dev(bio
, iomap
->bdev
);
1708 bio
->bi_iter
.bi_sector
= iomap_sector(iomap
, pos
);
1709 bio
->bi_write_hint
= dio
->iocb
->ki_hint
;
1710 bio
->bi_ioprio
= dio
->iocb
->ki_ioprio
;
1711 bio
->bi_private
= dio
;
1712 bio
->bi_end_io
= iomap_dio_bio_end_io
;
1714 ret
= bio_iov_iter_get_pages(bio
, &iter
);
1715 if (unlikely(ret
)) {
1717 * We have to stop part way through an IO. We must fall
1718 * through to the sub-block tail zeroing here, otherwise
1719 * this short IO may expose stale data in the tail of
1720 * the block we haven't written data to.
1726 n
= bio
->bi_iter
.bi_size
;
1727 if (dio
->flags
& IOMAP_DIO_WRITE
) {
1728 bio
->bi_opf
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
1730 bio
->bi_opf
|= REQ_FUA
;
1732 dio
->flags
&= ~IOMAP_DIO_WRITE_FUA
;
1733 task_io_account_write(n
);
1735 bio
->bi_opf
= REQ_OP_READ
;
1736 if (dio
->flags
& IOMAP_DIO_DIRTY
)
1737 bio_set_pages_dirty(bio
);
1740 iov_iter_advance(dio
->submit
.iter
, n
);
1746 nr_pages
= iov_iter_npages(&iter
, BIO_MAX_PAGES
);
1747 iomap_dio_submit_bio(dio
, iomap
, bio
);
1751 * We need to zeroout the tail of a sub-block write if the extent type
1752 * requires zeroing or the write extends beyond EOF. If we don't zero
1753 * the block tail in the latter case, we can expose stale data via mmap
1754 * reads of the EOF block.
1758 ((dio
->flags
& IOMAP_DIO_WRITE
) && pos
>= i_size_read(inode
))) {
1759 /* zero out from the end of the write to the end of the block */
1760 pad
= pos
& (fs_block_size
- 1);
1762 iomap_dio_zero(dio
, iomap
, pos
, fs_block_size
- pad
);
1764 return copied
? copied
: ret
;
1768 iomap_dio_hole_actor(loff_t length
, struct iomap_dio
*dio
)
1770 length
= iov_iter_zero(length
, dio
->submit
.iter
);
1771 dio
->size
+= length
;
1776 iomap_dio_inline_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
1777 struct iomap_dio
*dio
, struct iomap
*iomap
)
1779 struct iov_iter
*iter
= dio
->submit
.iter
;
1782 BUG_ON(pos
+ length
> PAGE_SIZE
- offset_in_page(iomap
->inline_data
));
1784 if (dio
->flags
& IOMAP_DIO_WRITE
) {
1785 loff_t size
= inode
->i_size
;
1788 memset(iomap
->inline_data
+ size
, 0, pos
- size
);
1789 copied
= copy_from_iter(iomap
->inline_data
+ pos
, length
, iter
);
1791 if (pos
+ copied
> size
)
1792 i_size_write(inode
, pos
+ copied
);
1793 mark_inode_dirty(inode
);
1796 copied
= copy_to_iter(iomap
->inline_data
+ pos
, length
, iter
);
1798 dio
->size
+= copied
;
1803 iomap_dio_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
1804 void *data
, struct iomap
*iomap
)
1806 struct iomap_dio
*dio
= data
;
1808 switch (iomap
->type
) {
1810 if (WARN_ON_ONCE(dio
->flags
& IOMAP_DIO_WRITE
))
1812 return iomap_dio_hole_actor(length
, dio
);
1813 case IOMAP_UNWRITTEN
:
1814 if (!(dio
->flags
& IOMAP_DIO_WRITE
))
1815 return iomap_dio_hole_actor(length
, dio
);
1816 return iomap_dio_bio_actor(inode
, pos
, length
, dio
, iomap
);
1818 return iomap_dio_bio_actor(inode
, pos
, length
, dio
, iomap
);
1820 return iomap_dio_inline_actor(inode
, pos
, length
, dio
, iomap
);
1828 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
1829 * is being issued as AIO or not. This allows us to optimise pure data writes
1830 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1831 * REQ_FLUSH post write. This is slightly tricky because a single request here
1832 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1833 * may be pure data writes. In that case, we still need to do a full data sync
1837 iomap_dio_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1838 const struct iomap_ops
*ops
, iomap_dio_end_io_t end_io
)
1840 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1841 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1842 size_t count
= iov_iter_count(iter
);
1843 loff_t pos
= iocb
->ki_pos
, start
= pos
;
1844 loff_t end
= iocb
->ki_pos
+ count
- 1, ret
= 0;
1845 unsigned int flags
= IOMAP_DIRECT
;
1846 bool wait_for_completion
= is_sync_kiocb(iocb
);
1847 struct blk_plug plug
;
1848 struct iomap_dio
*dio
;
1850 lockdep_assert_held(&inode
->i_rwsem
);
1855 dio
= kmalloc(sizeof(*dio
), GFP_KERNEL
);
1860 atomic_set(&dio
->ref
, 1);
1862 dio
->i_size
= i_size_read(inode
);
1863 dio
->end_io
= end_io
;
1867 dio
->submit
.iter
= iter
;
1868 dio
->submit
.waiter
= current
;
1869 dio
->submit
.cookie
= BLK_QC_T_NONE
;
1870 dio
->submit
.last_queue
= NULL
;
1872 if (iov_iter_rw(iter
) == READ
) {
1873 if (pos
>= dio
->i_size
)
1876 if (iter_is_iovec(iter
) && iov_iter_rw(iter
) == READ
)
1877 dio
->flags
|= IOMAP_DIO_DIRTY
;
1879 flags
|= IOMAP_WRITE
;
1880 dio
->flags
|= IOMAP_DIO_WRITE
;
1882 /* for data sync or sync, we need sync completion processing */
1883 if (iocb
->ki_flags
& IOCB_DSYNC
)
1884 dio
->flags
|= IOMAP_DIO_NEED_SYNC
;
1887 * For datasync only writes, we optimistically try using FUA for
1888 * this IO. Any non-FUA write that occurs will clear this flag,
1889 * hence we know before completion whether a cache flush is
1892 if ((iocb
->ki_flags
& (IOCB_DSYNC
| IOCB_SYNC
)) == IOCB_DSYNC
)
1893 dio
->flags
|= IOMAP_DIO_WRITE_FUA
;
1896 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
1897 if (filemap_range_has_page(mapping
, start
, end
)) {
1901 flags
|= IOMAP_NOWAIT
;
1904 ret
= filemap_write_and_wait_range(mapping
, start
, end
);
1909 * Try to invalidate cache pages for the range we're direct
1910 * writing. If this invalidation fails, tough, the write will
1911 * still work, but racing two incompatible write paths is a
1912 * pretty crazy thing to do, so we don't support it 100%.
1914 ret
= invalidate_inode_pages2_range(mapping
,
1915 start
>> PAGE_SHIFT
, end
>> PAGE_SHIFT
);
1917 dio_warn_stale_pagecache(iocb
->ki_filp
);
1920 if (iov_iter_rw(iter
) == WRITE
&& !wait_for_completion
&&
1921 !inode
->i_sb
->s_dio_done_wq
) {
1922 ret
= sb_init_dio_done_wq(inode
->i_sb
);
1927 inode_dio_begin(inode
);
1929 blk_start_plug(&plug
);
1931 ret
= iomap_apply(inode
, pos
, count
, flags
, ops
, dio
,
1934 /* magic error code to fall back to buffered I/O */
1935 if (ret
== -ENOTBLK
) {
1936 wait_for_completion
= true;
1943 if (iov_iter_rw(iter
) == READ
&& pos
>= dio
->i_size
)
1945 } while ((count
= iov_iter_count(iter
)) > 0);
1946 blk_finish_plug(&plug
);
1949 iomap_dio_set_error(dio
, ret
);
1952 * If all the writes we issued were FUA, we don't need to flush the
1953 * cache on IO completion. Clear the sync flag for this case.
1955 if (dio
->flags
& IOMAP_DIO_WRITE_FUA
)
1956 dio
->flags
&= ~IOMAP_DIO_NEED_SYNC
;
1958 WRITE_ONCE(iocb
->ki_cookie
, dio
->submit
.cookie
);
1959 WRITE_ONCE(iocb
->private, dio
->submit
.last_queue
);
1962 * We are about to drop our additional submission reference, which
1963 * might be the last reference to the dio. There are three three
1964 * different ways we can progress here:
1966 * (a) If this is the last reference we will always complete and free
1967 * the dio ourselves.
1968 * (b) If this is not the last reference, and we serve an asynchronous
1969 * iocb, we must never touch the dio after the decrement, the
1970 * I/O completion handler will complete and free it.
1971 * (c) If this is not the last reference, but we serve a synchronous
1972 * iocb, the I/O completion handler will wake us up on the drop
1973 * of the final reference, and we will complete and free it here
1974 * after we got woken by the I/O completion handler.
1976 dio
->wait_for_completion
= wait_for_completion
;
1977 if (!atomic_dec_and_test(&dio
->ref
)) {
1978 if (!wait_for_completion
)
1979 return -EIOCBQUEUED
;
1982 set_current_state(TASK_UNINTERRUPTIBLE
);
1983 if (!READ_ONCE(dio
->submit
.waiter
))
1986 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
1987 !dio
->submit
.last_queue
||
1988 !blk_poll(dio
->submit
.last_queue
,
1989 dio
->submit
.cookie
, true))
1992 __set_current_state(TASK_RUNNING
);
1995 return iomap_dio_complete(dio
);
2001 EXPORT_SYMBOL_GPL(iomap_dio_rw
);
2003 /* Swapfile activation */
2006 struct iomap_swapfile_info
{
2007 struct iomap iomap
; /* accumulated iomap */
2008 struct swap_info_struct
*sis
;
2009 uint64_t lowest_ppage
; /* lowest physical addr seen (pages) */
2010 uint64_t highest_ppage
; /* highest physical addr seen (pages) */
2011 unsigned long nr_pages
; /* number of pages collected */
2012 int nr_extents
; /* extent count */
2016 * Collect physical extents for this swap file. Physical extents reported to
2017 * the swap code must be trimmed to align to a page boundary. The logical
2018 * offset within the file is irrelevant since the swapfile code maps logical
2019 * page numbers of the swap device to the physical page-aligned extents.
2021 static int iomap_swapfile_add_extent(struct iomap_swapfile_info
*isi
)
2023 struct iomap
*iomap
= &isi
->iomap
;
2024 unsigned long nr_pages
;
2025 uint64_t first_ppage
;
2026 uint64_t first_ppage_reported
;
2027 uint64_t next_ppage
;
2031 * Round the start up and the end down so that the physical
2032 * extent aligns to a page boundary.
2034 first_ppage
= ALIGN(iomap
->addr
, PAGE_SIZE
) >> PAGE_SHIFT
;
2035 next_ppage
= ALIGN_DOWN(iomap
->addr
+ iomap
->length
, PAGE_SIZE
) >>
2038 /* Skip too-short physical extents. */
2039 if (first_ppage
>= next_ppage
)
2041 nr_pages
= next_ppage
- first_ppage
;
2044 * Calculate how much swap space we're adding; the first page contains
2045 * the swap header and doesn't count. The mm still wants that first
2046 * page fed to add_swap_extent, however.
2048 first_ppage_reported
= first_ppage
;
2049 if (iomap
->offset
== 0)
2050 first_ppage_reported
++;
2051 if (isi
->lowest_ppage
> first_ppage_reported
)
2052 isi
->lowest_ppage
= first_ppage_reported
;
2053 if (isi
->highest_ppage
< (next_ppage
- 1))
2054 isi
->highest_ppage
= next_ppage
- 1;
2056 /* Add extent, set up for the next call. */
2057 error
= add_swap_extent(isi
->sis
, isi
->nr_pages
, nr_pages
, first_ppage
);
2060 isi
->nr_extents
+= error
;
2061 isi
->nr_pages
+= nr_pages
;
2066 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
2067 * swap only cares about contiguous page-aligned physical extents and makes no
2068 * distinction between written and unwritten extents.
2070 static loff_t
iomap_swapfile_activate_actor(struct inode
*inode
, loff_t pos
,
2071 loff_t count
, void *data
, struct iomap
*iomap
)
2073 struct iomap_swapfile_info
*isi
= data
;
2076 switch (iomap
->type
) {
2078 case IOMAP_UNWRITTEN
:
2079 /* Only real or unwritten extents. */
2082 /* No inline data. */
2083 pr_err("swapon: file is inline\n");
2086 pr_err("swapon: file has unallocated extents\n");
2090 /* No uncommitted metadata or shared blocks. */
2091 if (iomap
->flags
& IOMAP_F_DIRTY
) {
2092 pr_err("swapon: file is not committed\n");
2095 if (iomap
->flags
& IOMAP_F_SHARED
) {
2096 pr_err("swapon: file has shared extents\n");
2100 /* Only one bdev per swap file. */
2101 if (iomap
->bdev
!= isi
->sis
->bdev
) {
2102 pr_err("swapon: file is on multiple devices\n");
2106 if (isi
->iomap
.length
== 0) {
2107 /* No accumulated extent, so just store it. */
2108 memcpy(&isi
->iomap
, iomap
, sizeof(isi
->iomap
));
2109 } else if (isi
->iomap
.addr
+ isi
->iomap
.length
== iomap
->addr
) {
2110 /* Append this to the accumulated extent. */
2111 isi
->iomap
.length
+= iomap
->length
;
2113 /* Otherwise, add the retained iomap and store this one. */
2114 error
= iomap_swapfile_add_extent(isi
);
2117 memcpy(&isi
->iomap
, iomap
, sizeof(isi
->iomap
));
2123 * Iterate a swap file's iomaps to construct physical extents that can be
2124 * passed to the swapfile subsystem.
2126 int iomap_swapfile_activate(struct swap_info_struct
*sis
,
2127 struct file
*swap_file
, sector_t
*pagespan
,
2128 const struct iomap_ops
*ops
)
2130 struct iomap_swapfile_info isi
= {
2132 .lowest_ppage
= (sector_t
)-1ULL,
2134 struct address_space
*mapping
= swap_file
->f_mapping
;
2135 struct inode
*inode
= mapping
->host
;
2137 loff_t len
= ALIGN_DOWN(i_size_read(inode
), PAGE_SIZE
);
2141 * Persist all file mapping metadata so that we won't have any
2142 * IOMAP_F_DIRTY iomaps.
2144 ret
= vfs_fsync(swap_file
, 1);
2149 ret
= iomap_apply(inode
, pos
, len
, IOMAP_REPORT
,
2150 ops
, &isi
, iomap_swapfile_activate_actor
);
2158 if (isi
.iomap
.length
) {
2159 ret
= iomap_swapfile_add_extent(&isi
);
2164 *pagespan
= 1 + isi
.highest_ppage
- isi
.lowest_ppage
;
2165 sis
->max
= isi
.nr_pages
;
2166 sis
->pages
= isi
.nr_pages
- 1;
2167 sis
->highest_bit
= isi
.nr_pages
- 1;
2168 return isi
.nr_extents
;
2170 EXPORT_SYMBOL_GPL(iomap_swapfile_activate
);
2171 #endif /* CONFIG_SWAP */
2174 iomap_bmap_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
2175 void *data
, struct iomap
*iomap
)
2177 sector_t
*bno
= data
, addr
;
2179 if (iomap
->type
== IOMAP_MAPPED
) {
2180 addr
= (pos
- iomap
->offset
+ iomap
->addr
) >> inode
->i_blkbits
;
2182 WARN(1, "would truncate bmap result\n");
2189 /* legacy ->bmap interface. 0 is the error return (!) */
2191 iomap_bmap(struct address_space
*mapping
, sector_t bno
,
2192 const struct iomap_ops
*ops
)
2194 struct inode
*inode
= mapping
->host
;
2195 loff_t pos
= bno
<< inode
->i_blkbits
;
2196 unsigned blocksize
= i_blocksize(inode
);
2198 if (filemap_write_and_wait(mapping
))
2202 iomap_apply(inode
, pos
, blocksize
, 0, ops
, &bno
, iomap_bmap_actor
);
2205 EXPORT_SYMBOL_GPL(iomap_bmap
);