1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/bio.h>
17 #include <linux/sched/signal.h>
18 #include <linux/migrate.h>
20 #include "../internal.h"
22 static struct iomap_page
*
23 iomap_page_create(struct inode
*inode
, struct page
*page
)
25 struct iomap_page
*iop
= to_iomap_page(page
);
27 if (iop
|| i_blocksize(inode
) == PAGE_SIZE
)
30 iop
= kmalloc(sizeof(*iop
), GFP_NOFS
| __GFP_NOFAIL
);
31 atomic_set(&iop
->read_count
, 0);
32 atomic_set(&iop
->write_count
, 0);
33 bitmap_zero(iop
->uptodate
, PAGE_SIZE
/ SECTOR_SIZE
);
36 * migrate_page_move_mapping() assumes that pages with private data have
37 * their count elevated by 1.
40 set_page_private(page
, (unsigned long)iop
);
46 iomap_page_release(struct page
*page
)
48 struct iomap_page
*iop
= to_iomap_page(page
);
52 WARN_ON_ONCE(atomic_read(&iop
->read_count
));
53 WARN_ON_ONCE(atomic_read(&iop
->write_count
));
54 ClearPagePrivate(page
);
55 set_page_private(page
, 0);
61 * Calculate the range inside the page that we actually need to read.
64 iomap_adjust_read_range(struct inode
*inode
, struct iomap_page
*iop
,
65 loff_t
*pos
, loff_t length
, unsigned *offp
, unsigned *lenp
)
67 loff_t orig_pos
= *pos
;
68 loff_t isize
= i_size_read(inode
);
69 unsigned block_bits
= inode
->i_blkbits
;
70 unsigned block_size
= (1 << block_bits
);
71 unsigned poff
= offset_in_page(*pos
);
72 unsigned plen
= min_t(loff_t
, PAGE_SIZE
- poff
, length
);
73 unsigned first
= poff
>> block_bits
;
74 unsigned last
= (poff
+ plen
- 1) >> block_bits
;
77 * If the block size is smaller than the page size we need to check the
78 * per-block uptodate status and adjust the offset and length if needed
79 * to avoid reading in already uptodate ranges.
84 /* move forward for each leading block marked uptodate */
85 for (i
= first
; i
<= last
; i
++) {
86 if (!test_bit(i
, iop
->uptodate
))
94 /* truncate len if we find any trailing uptodate block(s) */
95 for ( ; i
<= last
; i
++) {
96 if (test_bit(i
, iop
->uptodate
)) {
97 plen
-= (last
- i
+ 1) * block_size
;
105 * If the extent spans the block that contains the i_size we need to
106 * handle both halves separately so that we properly zero data in the
107 * page cache for blocks that are entirely outside of i_size.
109 if (orig_pos
<= isize
&& orig_pos
+ length
> isize
) {
110 unsigned end
= offset_in_page(isize
- 1) >> block_bits
;
112 if (first
<= end
&& last
> end
)
113 plen
-= (last
- end
) * block_size
;
121 iomap_set_range_uptodate(struct page
*page
, unsigned off
, unsigned len
)
123 struct iomap_page
*iop
= to_iomap_page(page
);
124 struct inode
*inode
= page
->mapping
->host
;
125 unsigned first
= off
>> inode
->i_blkbits
;
126 unsigned last
= (off
+ len
- 1) >> inode
->i_blkbits
;
128 bool uptodate
= true;
131 for (i
= 0; i
< PAGE_SIZE
/ i_blocksize(inode
); i
++) {
132 if (i
>= first
&& i
<= last
)
133 set_bit(i
, iop
->uptodate
);
134 else if (!test_bit(i
, iop
->uptodate
))
139 if (uptodate
&& !PageError(page
))
140 SetPageUptodate(page
);
144 iomap_read_finish(struct iomap_page
*iop
, struct page
*page
)
146 if (!iop
|| atomic_dec_and_test(&iop
->read_count
))
151 iomap_read_page_end_io(struct bio_vec
*bvec
, int error
)
153 struct page
*page
= bvec
->bv_page
;
154 struct iomap_page
*iop
= to_iomap_page(page
);
156 if (unlikely(error
)) {
157 ClearPageUptodate(page
);
160 iomap_set_range_uptodate(page
, bvec
->bv_offset
, bvec
->bv_len
);
163 iomap_read_finish(iop
, page
);
167 iomap_read_end_io(struct bio
*bio
)
169 int error
= blk_status_to_errno(bio
->bi_status
);
170 struct bio_vec
*bvec
;
171 struct bvec_iter_all iter_all
;
173 bio_for_each_segment_all(bvec
, bio
, iter_all
)
174 iomap_read_page_end_io(bvec
, error
);
178 struct iomap_readpage_ctx
{
179 struct page
*cur_page
;
180 bool cur_page_in_bio
;
183 struct list_head
*pages
;
187 iomap_read_inline_data(struct inode
*inode
, struct page
*page
,
190 size_t size
= i_size_read(inode
);
193 if (PageUptodate(page
))
197 BUG_ON(size
> PAGE_SIZE
- offset_in_page(iomap
->inline_data
));
199 addr
= kmap_atomic(page
);
200 memcpy(addr
, iomap
->inline_data
, size
);
201 memset(addr
+ size
, 0, PAGE_SIZE
- size
);
203 SetPageUptodate(page
);
207 iomap_readpage_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
210 struct iomap_readpage_ctx
*ctx
= data
;
211 struct page
*page
= ctx
->cur_page
;
212 struct iomap_page
*iop
= iomap_page_create(inode
, page
);
213 bool same_page
= false, is_contig
= false;
214 loff_t orig_pos
= pos
;
218 if (iomap
->type
== IOMAP_INLINE
) {
220 iomap_read_inline_data(inode
, page
, iomap
);
224 /* zero post-eof blocks as the page may be mapped */
225 iomap_adjust_read_range(inode
, iop
, &pos
, length
, &poff
, &plen
);
229 if (iomap
->type
!= IOMAP_MAPPED
|| pos
>= i_size_read(inode
)) {
230 zero_user(page
, poff
, plen
);
231 iomap_set_range_uptodate(page
, poff
, plen
);
235 ctx
->cur_page_in_bio
= true;
238 * Try to merge into a previous segment if we can.
240 sector
= iomap_sector(iomap
, pos
);
241 if (ctx
->bio
&& bio_end_sector(ctx
->bio
) == sector
)
245 __bio_try_merge_page(ctx
->bio
, page
, plen
, poff
, &same_page
)) {
246 if (!same_page
&& iop
)
247 atomic_inc(&iop
->read_count
);
252 * If we start a new segment we need to increase the read count, and we
253 * need to do so before submitting any previous full bio to make sure
254 * that we don't prematurely unlock the page.
257 atomic_inc(&iop
->read_count
);
259 if (!ctx
->bio
|| !is_contig
|| bio_full(ctx
->bio
, plen
)) {
260 gfp_t gfp
= mapping_gfp_constraint(page
->mapping
, GFP_KERNEL
);
261 int nr_vecs
= (length
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
264 submit_bio(ctx
->bio
);
266 if (ctx
->is_readahead
) /* same as readahead_gfp_mask */
267 gfp
|= __GFP_NORETRY
| __GFP_NOWARN
;
268 ctx
->bio
= bio_alloc(gfp
, min(BIO_MAX_PAGES
, nr_vecs
));
269 ctx
->bio
->bi_opf
= REQ_OP_READ
;
270 if (ctx
->is_readahead
)
271 ctx
->bio
->bi_opf
|= REQ_RAHEAD
;
272 ctx
->bio
->bi_iter
.bi_sector
= sector
;
273 bio_set_dev(ctx
->bio
, iomap
->bdev
);
274 ctx
->bio
->bi_end_io
= iomap_read_end_io
;
277 bio_add_page(ctx
->bio
, page
, plen
, poff
);
280 * Move the caller beyond our range so that it keeps making progress.
281 * For that we have to include any leading non-uptodate ranges, but
282 * we can skip trailing ones as they will be handled in the next
285 return pos
- orig_pos
+ plen
;
289 iomap_readpage(struct page
*page
, const struct iomap_ops
*ops
)
291 struct iomap_readpage_ctx ctx
= { .cur_page
= page
};
292 struct inode
*inode
= page
->mapping
->host
;
296 for (poff
= 0; poff
< PAGE_SIZE
; poff
+= ret
) {
297 ret
= iomap_apply(inode
, page_offset(page
) + poff
,
298 PAGE_SIZE
- poff
, 0, ops
, &ctx
,
299 iomap_readpage_actor
);
301 WARN_ON_ONCE(ret
== 0);
309 WARN_ON_ONCE(!ctx
.cur_page_in_bio
);
311 WARN_ON_ONCE(ctx
.cur_page_in_bio
);
316 * Just like mpage_readpages and block_read_full_page we always
317 * return 0 and just mark the page as PageError on errors. This
318 * should be cleaned up all through the stack eventually.
322 EXPORT_SYMBOL_GPL(iomap_readpage
);
325 iomap_next_page(struct inode
*inode
, struct list_head
*pages
, loff_t pos
,
326 loff_t length
, loff_t
*done
)
328 while (!list_empty(pages
)) {
329 struct page
*page
= lru_to_page(pages
);
331 if (page_offset(page
) >= (u64
)pos
+ length
)
334 list_del(&page
->lru
);
335 if (!add_to_page_cache_lru(page
, inode
->i_mapping
, page
->index
,
340 * If we already have a page in the page cache at index we are
341 * done. Upper layers don't care if it is uptodate after the
342 * readpages call itself as every page gets checked again once
353 iomap_readpages_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
354 void *data
, struct iomap
*iomap
)
356 struct iomap_readpage_ctx
*ctx
= data
;
359 for (done
= 0; done
< length
; done
+= ret
) {
360 if (ctx
->cur_page
&& offset_in_page(pos
+ done
) == 0) {
361 if (!ctx
->cur_page_in_bio
)
362 unlock_page(ctx
->cur_page
);
363 put_page(ctx
->cur_page
);
364 ctx
->cur_page
= NULL
;
366 if (!ctx
->cur_page
) {
367 ctx
->cur_page
= iomap_next_page(inode
, ctx
->pages
,
371 ctx
->cur_page_in_bio
= false;
373 ret
= iomap_readpage_actor(inode
, pos
+ done
, length
- done
,
381 iomap_readpages(struct address_space
*mapping
, struct list_head
*pages
,
382 unsigned nr_pages
, const struct iomap_ops
*ops
)
384 struct iomap_readpage_ctx ctx
= {
386 .is_readahead
= true,
388 loff_t pos
= page_offset(list_entry(pages
->prev
, struct page
, lru
));
389 loff_t last
= page_offset(list_entry(pages
->next
, struct page
, lru
));
390 loff_t length
= last
- pos
+ PAGE_SIZE
, ret
= 0;
393 ret
= iomap_apply(mapping
->host
, pos
, length
, 0, ops
,
394 &ctx
, iomap_readpages_actor
);
396 WARN_ON_ONCE(ret
== 0);
407 if (!ctx
.cur_page_in_bio
)
408 unlock_page(ctx
.cur_page
);
409 put_page(ctx
.cur_page
);
413 * Check that we didn't lose a page due to the arcance calling
416 WARN_ON_ONCE(!ret
&& !list_empty(ctx
.pages
));
419 EXPORT_SYMBOL_GPL(iomap_readpages
);
422 * iomap_is_partially_uptodate checks whether blocks within a page are
425 * Returns true if all blocks which correspond to a file portion
426 * we want to read within the page are uptodate.
429 iomap_is_partially_uptodate(struct page
*page
, unsigned long from
,
432 struct iomap_page
*iop
= to_iomap_page(page
);
433 struct inode
*inode
= page
->mapping
->host
;
434 unsigned len
, first
, last
;
437 /* Limit range to one page */
438 len
= min_t(unsigned, PAGE_SIZE
- from
, count
);
440 /* First and last blocks in range within page */
441 first
= from
>> inode
->i_blkbits
;
442 last
= (from
+ len
- 1) >> inode
->i_blkbits
;
445 for (i
= first
; i
<= last
; i
++)
446 if (!test_bit(i
, iop
->uptodate
))
453 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate
);
456 iomap_releasepage(struct page
*page
, gfp_t gfp_mask
)
459 * mm accommodates an old ext3 case where clean pages might not have had
460 * the dirty bit cleared. Thus, it can send actual dirty pages to
461 * ->releasepage() via shrink_active_list(), skip those here.
463 if (PageDirty(page
) || PageWriteback(page
))
465 iomap_page_release(page
);
468 EXPORT_SYMBOL_GPL(iomap_releasepage
);
471 iomap_invalidatepage(struct page
*page
, unsigned int offset
, unsigned int len
)
474 * If we are invalidating the entire page, clear the dirty state from it
475 * and release it to avoid unnecessary buildup of the LRU.
477 if (offset
== 0 && len
== PAGE_SIZE
) {
478 WARN_ON_ONCE(PageWriteback(page
));
479 cancel_dirty_page(page
);
480 iomap_page_release(page
);
483 EXPORT_SYMBOL_GPL(iomap_invalidatepage
);
485 #ifdef CONFIG_MIGRATION
487 iomap_migrate_page(struct address_space
*mapping
, struct page
*newpage
,
488 struct page
*page
, enum migrate_mode mode
)
492 ret
= migrate_page_move_mapping(mapping
, newpage
, page
, 0);
493 if (ret
!= MIGRATEPAGE_SUCCESS
)
496 if (page_has_private(page
)) {
497 ClearPagePrivate(page
);
499 set_page_private(newpage
, page_private(page
));
500 set_page_private(page
, 0);
502 SetPagePrivate(newpage
);
505 if (mode
!= MIGRATE_SYNC_NO_COPY
)
506 migrate_page_copy(newpage
, page
);
508 migrate_page_states(newpage
, page
);
509 return MIGRATEPAGE_SUCCESS
;
511 EXPORT_SYMBOL_GPL(iomap_migrate_page
);
512 #endif /* CONFIG_MIGRATION */
515 iomap_write_failed(struct inode
*inode
, loff_t pos
, unsigned len
)
517 loff_t i_size
= i_size_read(inode
);
520 * Only truncate newly allocated pages beyoned EOF, even if the
521 * write started inside the existing inode size.
523 if (pos
+ len
> i_size
)
524 truncate_pagecache_range(inode
, max(pos
, i_size
), pos
+ len
);
528 iomap_read_page_sync(struct inode
*inode
, loff_t block_start
, struct page
*page
,
529 unsigned poff
, unsigned plen
, unsigned from
, unsigned to
,
535 if (iomap
->type
!= IOMAP_MAPPED
|| block_start
>= i_size_read(inode
)) {
536 zero_user_segments(page
, poff
, from
, to
, poff
+ plen
);
537 iomap_set_range_uptodate(page
, poff
, plen
);
541 bio_init(&bio
, &bvec
, 1);
542 bio
.bi_opf
= REQ_OP_READ
;
543 bio
.bi_iter
.bi_sector
= iomap_sector(iomap
, block_start
);
544 bio_set_dev(&bio
, iomap
->bdev
);
545 __bio_add_page(&bio
, page
, plen
, poff
);
546 return submit_bio_wait(&bio
);
550 __iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
,
551 struct page
*page
, struct iomap
*iomap
)
553 struct iomap_page
*iop
= iomap_page_create(inode
, page
);
554 loff_t block_size
= i_blocksize(inode
);
555 loff_t block_start
= pos
& ~(block_size
- 1);
556 loff_t block_end
= (pos
+ len
+ block_size
- 1) & ~(block_size
- 1);
557 unsigned from
= offset_in_page(pos
), to
= from
+ len
, poff
, plen
;
560 if (PageUptodate(page
))
564 iomap_adjust_read_range(inode
, iop
, &block_start
,
565 block_end
- block_start
, &poff
, &plen
);
569 if ((from
> poff
&& from
< poff
+ plen
) ||
570 (to
> poff
&& to
< poff
+ plen
)) {
571 status
= iomap_read_page_sync(inode
, block_start
, page
,
572 poff
, plen
, from
, to
, iomap
);
577 } while ((block_start
+= plen
) < block_end
);
583 iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
, unsigned flags
,
584 struct page
**pagep
, struct iomap
*iomap
)
586 const struct iomap_page_ops
*page_ops
= iomap
->page_ops
;
587 pgoff_t index
= pos
>> PAGE_SHIFT
;
591 BUG_ON(pos
+ len
> iomap
->offset
+ iomap
->length
);
593 if (fatal_signal_pending(current
))
596 if (page_ops
&& page_ops
->page_prepare
) {
597 status
= page_ops
->page_prepare(inode
, pos
, len
, iomap
);
602 page
= grab_cache_page_write_begin(inode
->i_mapping
, index
, flags
);
608 if (iomap
->type
== IOMAP_INLINE
)
609 iomap_read_inline_data(inode
, page
, iomap
);
610 else if (iomap
->flags
& IOMAP_F_BUFFER_HEAD
)
611 status
= __block_write_begin_int(page
, pos
, len
, NULL
, iomap
);
613 status
= __iomap_write_begin(inode
, pos
, len
, page
, iomap
);
615 if (unlikely(status
))
624 iomap_write_failed(inode
, pos
, len
);
627 if (page_ops
&& page_ops
->page_done
)
628 page_ops
->page_done(inode
, pos
, 0, NULL
, iomap
);
633 iomap_set_page_dirty(struct page
*page
)
635 struct address_space
*mapping
= page_mapping(page
);
638 if (unlikely(!mapping
))
639 return !TestSetPageDirty(page
);
642 * Lock out page->mem_cgroup migration to keep PageDirty
643 * synchronized with per-memcg dirty page counters.
645 lock_page_memcg(page
);
646 newly_dirty
= !TestSetPageDirty(page
);
648 __set_page_dirty(page
, mapping
, 0);
649 unlock_page_memcg(page
);
652 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
655 EXPORT_SYMBOL_GPL(iomap_set_page_dirty
);
658 __iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
,
659 unsigned copied
, struct page
*page
, struct iomap
*iomap
)
661 flush_dcache_page(page
);
664 * The blocks that were entirely written will now be uptodate, so we
665 * don't have to worry about a readpage reading them and overwriting a
666 * partial write. However if we have encountered a short write and only
667 * partially written into a block, it will not be marked uptodate, so a
668 * readpage might come in and destroy our partial write.
670 * Do the simplest thing, and just treat any short write to a non
671 * uptodate page as a zero-length write, and force the caller to redo
674 if (unlikely(copied
< len
&& !PageUptodate(page
)))
676 iomap_set_range_uptodate(page
, offset_in_page(pos
), len
);
677 iomap_set_page_dirty(page
);
682 iomap_write_end_inline(struct inode
*inode
, struct page
*page
,
683 struct iomap
*iomap
, loff_t pos
, unsigned copied
)
687 WARN_ON_ONCE(!PageUptodate(page
));
688 BUG_ON(pos
+ copied
> PAGE_SIZE
- offset_in_page(iomap
->inline_data
));
690 addr
= kmap_atomic(page
);
691 memcpy(iomap
->inline_data
+ pos
, addr
+ pos
, copied
);
694 mark_inode_dirty(inode
);
699 iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
,
700 unsigned copied
, struct page
*page
, struct iomap
*iomap
)
702 const struct iomap_page_ops
*page_ops
= iomap
->page_ops
;
703 loff_t old_size
= inode
->i_size
;
706 if (iomap
->type
== IOMAP_INLINE
) {
707 ret
= iomap_write_end_inline(inode
, page
, iomap
, pos
, copied
);
708 } else if (iomap
->flags
& IOMAP_F_BUFFER_HEAD
) {
709 ret
= block_write_end(NULL
, inode
->i_mapping
, pos
, len
, copied
,
712 ret
= __iomap_write_end(inode
, pos
, len
, copied
, page
, iomap
);
716 * Update the in-memory inode size after copying the data into the page
717 * cache. It's up to the file system to write the updated size to disk,
718 * preferably after I/O completion so that no stale data is exposed.
720 if (pos
+ ret
> old_size
) {
721 i_size_write(inode
, pos
+ ret
);
722 iomap
->flags
|= IOMAP_F_SIZE_CHANGED
;
727 pagecache_isize_extended(inode
, old_size
, pos
);
728 if (page_ops
&& page_ops
->page_done
)
729 page_ops
->page_done(inode
, pos
, ret
, page
, iomap
);
733 iomap_write_failed(inode
, pos
, len
);
738 iomap_write_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
741 struct iov_iter
*i
= data
;
744 unsigned int flags
= AOP_FLAG_NOFS
;
748 unsigned long offset
; /* Offset into pagecache page */
749 unsigned long bytes
; /* Bytes to write to page */
750 size_t copied
; /* Bytes copied from user */
752 offset
= offset_in_page(pos
);
753 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
760 * Bring in the user page that we will copy from _first_.
761 * Otherwise there's a nasty deadlock on copying from the
762 * same page as we're writing to, without it being marked
765 * Not only is this an optimisation, but it is also required
766 * to check that the address is actually valid, when atomic
767 * usercopies are used, below.
769 if (unlikely(iov_iter_fault_in_readable(i
, bytes
))) {
774 status
= iomap_write_begin(inode
, pos
, bytes
, flags
, &page
,
776 if (unlikely(status
))
779 if (mapping_writably_mapped(inode
->i_mapping
))
780 flush_dcache_page(page
);
782 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, bytes
);
784 flush_dcache_page(page
);
786 status
= iomap_write_end(inode
, pos
, bytes
, copied
, page
,
788 if (unlikely(status
< 0))
794 iov_iter_advance(i
, copied
);
795 if (unlikely(copied
== 0)) {
797 * If we were unable to copy any data at all, we must
798 * fall back to a single segment length write.
800 * If we didn't fallback here, we could livelock
801 * because not all segments in the iov can be copied at
802 * once without a pagefault.
804 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
805 iov_iter_single_seg_count(i
));
812 balance_dirty_pages_ratelimited(inode
->i_mapping
);
813 } while (iov_iter_count(i
) && length
);
815 return written
? written
: status
;
819 iomap_file_buffered_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
820 const struct iomap_ops
*ops
)
822 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
823 loff_t pos
= iocb
->ki_pos
, ret
= 0, written
= 0;
825 while (iov_iter_count(iter
)) {
826 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
),
827 IOMAP_WRITE
, ops
, iter
, iomap_write_actor
);
834 return written
? written
: ret
;
836 EXPORT_SYMBOL_GPL(iomap_file_buffered_write
);
839 __iomap_read_page(struct inode
*inode
, loff_t offset
)
841 struct address_space
*mapping
= inode
->i_mapping
;
844 page
= read_mapping_page(mapping
, offset
>> PAGE_SHIFT
, NULL
);
847 if (!PageUptodate(page
)) {
849 return ERR_PTR(-EIO
);
855 iomap_dirty_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
862 struct page
*page
, *rpage
;
863 unsigned long offset
; /* Offset into pagecache page */
864 unsigned long bytes
; /* Bytes to write to page */
866 offset
= offset_in_page(pos
);
867 bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, length
);
869 rpage
= __iomap_read_page(inode
, pos
);
871 return PTR_ERR(rpage
);
873 status
= iomap_write_begin(inode
, pos
, bytes
,
874 AOP_FLAG_NOFS
, &page
, iomap
);
876 if (unlikely(status
))
879 WARN_ON_ONCE(!PageUptodate(page
));
881 status
= iomap_write_end(inode
, pos
, bytes
, bytes
, page
, iomap
);
882 if (unlikely(status
<= 0)) {
883 if (WARN_ON_ONCE(status
== 0))
894 balance_dirty_pages_ratelimited(inode
->i_mapping
);
901 iomap_file_dirty(struct inode
*inode
, loff_t pos
, loff_t len
,
902 const struct iomap_ops
*ops
)
907 ret
= iomap_apply(inode
, pos
, len
, IOMAP_WRITE
, ops
, NULL
,
917 EXPORT_SYMBOL_GPL(iomap_file_dirty
);
919 static int iomap_zero(struct inode
*inode
, loff_t pos
, unsigned offset
,
920 unsigned bytes
, struct iomap
*iomap
)
925 status
= iomap_write_begin(inode
, pos
, bytes
, AOP_FLAG_NOFS
, &page
,
930 zero_user(page
, offset
, bytes
);
931 mark_page_accessed(page
);
933 return iomap_write_end(inode
, pos
, bytes
, bytes
, page
, iomap
);
936 static int iomap_dax_zero(loff_t pos
, unsigned offset
, unsigned bytes
,
939 return __dax_zero_page_range(iomap
->bdev
, iomap
->dax_dev
,
940 iomap_sector(iomap
, pos
& PAGE_MASK
), offset
, bytes
);
944 iomap_zero_range_actor(struct inode
*inode
, loff_t pos
, loff_t count
,
945 void *data
, struct iomap
*iomap
)
947 bool *did_zero
= data
;
951 /* already zeroed? we're done. */
952 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
956 unsigned offset
, bytes
;
958 offset
= offset_in_page(pos
);
959 bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, count
);
962 status
= iomap_dax_zero(pos
, offset
, bytes
, iomap
);
964 status
= iomap_zero(inode
, pos
, offset
, bytes
, iomap
);
979 iomap_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
980 const struct iomap_ops
*ops
)
985 ret
= iomap_apply(inode
, pos
, len
, IOMAP_ZERO
,
986 ops
, did_zero
, iomap_zero_range_actor
);
996 EXPORT_SYMBOL_GPL(iomap_zero_range
);
999 iomap_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
1000 const struct iomap_ops
*ops
)
1002 unsigned int blocksize
= i_blocksize(inode
);
1003 unsigned int off
= pos
& (blocksize
- 1);
1005 /* Block boundary? Nothing to do */
1008 return iomap_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
1010 EXPORT_SYMBOL_GPL(iomap_truncate_page
);
1013 iomap_page_mkwrite_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
1014 void *data
, struct iomap
*iomap
)
1016 struct page
*page
= data
;
1019 if (iomap
->flags
& IOMAP_F_BUFFER_HEAD
) {
1020 ret
= __block_write_begin_int(page
, pos
, length
, NULL
, iomap
);
1023 block_commit_write(page
, 0, length
);
1025 WARN_ON_ONCE(!PageUptodate(page
));
1026 iomap_page_create(inode
, page
);
1027 set_page_dirty(page
);
1033 vm_fault_t
iomap_page_mkwrite(struct vm_fault
*vmf
, const struct iomap_ops
*ops
)
1035 struct page
*page
= vmf
->page
;
1036 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
1037 unsigned long length
;
1038 loff_t offset
, size
;
1042 size
= i_size_read(inode
);
1043 if ((page
->mapping
!= inode
->i_mapping
) ||
1044 (page_offset(page
) > size
)) {
1045 /* We overload EFAULT to mean page got truncated */
1050 /* page is wholly or partially inside EOF */
1051 if (((page
->index
+ 1) << PAGE_SHIFT
) > size
)
1052 length
= offset_in_page(size
);
1056 offset
= page_offset(page
);
1057 while (length
> 0) {
1058 ret
= iomap_apply(inode
, offset
, length
,
1059 IOMAP_WRITE
| IOMAP_FAULT
, ops
, page
,
1060 iomap_page_mkwrite_actor
);
1061 if (unlikely(ret
<= 0))
1067 wait_for_stable_page(page
);
1068 return VM_FAULT_LOCKED
;
1071 return block_page_mkwrite_return(ret
);
1073 EXPORT_SYMBOL_GPL(iomap_page_mkwrite
);