1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/swap.h>
18 #include <linux/prefetch.h>
19 #include <linux/uio.h>
20 #include <linux/cleancache.h>
21 #include <linux/sched/signal.h>
27 #include <trace/events/f2fs.h>
29 #define NUM_PREALLOC_POST_READ_CTXS 128
31 static struct kmem_cache
*bio_post_read_ctx_cache
;
32 static mempool_t
*bio_post_read_ctx_pool
;
34 static bool __is_cp_guaranteed(struct page
*page
)
36 struct address_space
*mapping
= page
->mapping
;
38 struct f2fs_sb_info
*sbi
;
43 inode
= mapping
->host
;
44 sbi
= F2FS_I_SB(inode
);
46 if (inode
->i_ino
== F2FS_META_INO(sbi
) ||
47 inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
48 S_ISDIR(inode
->i_mode
) ||
49 (S_ISREG(inode
->i_mode
) &&
50 (f2fs_is_atomic_file(inode
) || IS_NOQUOTA(inode
))) ||
56 static enum count_type
__read_io_type(struct page
*page
)
58 struct address_space
*mapping
= page_file_mapping(page
);
61 struct inode
*inode
= mapping
->host
;
62 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
64 if (inode
->i_ino
== F2FS_META_INO(sbi
))
67 if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
73 /* postprocessing steps for read bios */
74 enum bio_post_read_step
{
79 struct bio_post_read_ctx
{
81 struct work_struct work
;
82 unsigned int cur_step
;
83 unsigned int enabled_steps
;
86 static void __read_end_io(struct bio
*bio
)
90 struct bvec_iter_all iter_all
;
92 bio_for_each_segment_all(bv
, bio
, iter_all
) {
95 /* PG_error was set if any post_read step failed */
96 if (bio
->bi_status
|| PageError(page
)) {
97 ClearPageUptodate(page
);
98 /* will re-read again later */
101 SetPageUptodate(page
);
103 dec_page_count(F2FS_P_SB(page
), __read_io_type(page
));
107 mempool_free(bio
->bi_private
, bio_post_read_ctx_pool
);
111 static void bio_post_read_processing(struct bio_post_read_ctx
*ctx
);
113 static void decrypt_work(struct work_struct
*work
)
115 struct bio_post_read_ctx
*ctx
=
116 container_of(work
, struct bio_post_read_ctx
, work
);
118 fscrypt_decrypt_bio(ctx
->bio
);
120 bio_post_read_processing(ctx
);
123 static void bio_post_read_processing(struct bio_post_read_ctx
*ctx
)
125 switch (++ctx
->cur_step
) {
127 if (ctx
->enabled_steps
& (1 << STEP_DECRYPT
)) {
128 INIT_WORK(&ctx
->work
, decrypt_work
);
129 fscrypt_enqueue_decrypt_work(&ctx
->work
);
135 __read_end_io(ctx
->bio
);
139 static bool f2fs_bio_post_read_required(struct bio
*bio
)
141 return bio
->bi_private
&& !bio
->bi_status
;
144 static void f2fs_read_end_io(struct bio
*bio
)
146 if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio
)),
148 f2fs_show_injection_info(FAULT_READ_IO
);
149 bio
->bi_status
= BLK_STS_IOERR
;
152 if (f2fs_bio_post_read_required(bio
)) {
153 struct bio_post_read_ctx
*ctx
= bio
->bi_private
;
155 ctx
->cur_step
= STEP_INITIAL
;
156 bio_post_read_processing(ctx
);
163 static void f2fs_write_end_io(struct bio
*bio
)
165 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
166 struct bio_vec
*bvec
;
167 struct bvec_iter_all iter_all
;
169 if (time_to_inject(sbi
, FAULT_WRITE_IO
)) {
170 f2fs_show_injection_info(FAULT_WRITE_IO
);
171 bio
->bi_status
= BLK_STS_IOERR
;
174 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
175 struct page
*page
= bvec
->bv_page
;
176 enum count_type type
= WB_DATA_TYPE(page
);
178 if (IS_DUMMY_WRITTEN_PAGE(page
)) {
179 set_page_private(page
, (unsigned long)NULL
);
180 ClearPagePrivate(page
);
182 mempool_free(page
, sbi
->write_io_dummy
);
184 if (unlikely(bio
->bi_status
))
185 f2fs_stop_checkpoint(sbi
, true);
189 fscrypt_finalize_bounce_page(&page
);
191 if (unlikely(bio
->bi_status
)) {
192 mapping_set_error(page
->mapping
, -EIO
);
193 if (type
== F2FS_WB_CP_DATA
)
194 f2fs_stop_checkpoint(sbi
, true);
197 f2fs_bug_on(sbi
, page
->mapping
== NODE_MAPPING(sbi
) &&
198 page
->index
!= nid_of_node(page
));
200 dec_page_count(sbi
, type
);
201 if (f2fs_in_warm_node_list(sbi
, page
))
202 f2fs_del_fsync_node_entry(sbi
, page
);
203 clear_cold_data(page
);
204 end_page_writeback(page
);
206 if (!get_pages(sbi
, F2FS_WB_CP_DATA
) &&
207 wq_has_sleeper(&sbi
->cp_wait
))
208 wake_up(&sbi
->cp_wait
);
214 * Return true, if pre_bio's bdev is same as its target device.
216 struct block_device
*f2fs_target_device(struct f2fs_sb_info
*sbi
,
217 block_t blk_addr
, struct bio
*bio
)
219 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
222 if (f2fs_is_multi_device(sbi
)) {
223 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
224 if (FDEV(i
).start_blk
<= blk_addr
&&
225 FDEV(i
).end_blk
>= blk_addr
) {
226 blk_addr
-= FDEV(i
).start_blk
;
233 bio_set_dev(bio
, bdev
);
234 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
239 int f2fs_target_device_index(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
243 if (!f2fs_is_multi_device(sbi
))
246 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
247 if (FDEV(i
).start_blk
<= blkaddr
&& FDEV(i
).end_blk
>= blkaddr
)
252 static bool __same_bdev(struct f2fs_sb_info
*sbi
,
253 block_t blk_addr
, struct bio
*bio
)
255 struct block_device
*b
= f2fs_target_device(sbi
, blk_addr
, NULL
);
256 return bio
->bi_disk
== b
->bd_disk
&& bio
->bi_partno
== b
->bd_partno
;
260 * Low-level block read/write IO operations.
262 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
263 struct writeback_control
*wbc
,
264 int npages
, bool is_read
,
265 enum page_type type
, enum temp_type temp
)
269 bio
= f2fs_bio_alloc(sbi
, npages
, true);
271 f2fs_target_device(sbi
, blk_addr
, bio
);
273 bio
->bi_end_io
= f2fs_read_end_io
;
274 bio
->bi_private
= NULL
;
276 bio
->bi_end_io
= f2fs_write_end_io
;
277 bio
->bi_private
= sbi
;
278 bio
->bi_write_hint
= f2fs_io_type_to_rw_hint(sbi
, type
, temp
);
281 wbc_init_bio(wbc
, bio
);
286 static inline void __submit_bio(struct f2fs_sb_info
*sbi
,
287 struct bio
*bio
, enum page_type type
)
289 if (!is_read_io(bio_op(bio
))) {
292 if (type
!= DATA
&& type
!= NODE
)
295 if (test_opt(sbi
, LFS
) && current
->plug
)
296 blk_finish_plug(current
->plug
);
298 start
= bio
->bi_iter
.bi_size
>> F2FS_BLKSIZE_BITS
;
299 start
%= F2FS_IO_SIZE(sbi
);
304 /* fill dummy pages */
305 for (; start
< F2FS_IO_SIZE(sbi
); start
++) {
307 mempool_alloc(sbi
->write_io_dummy
,
308 GFP_NOIO
| __GFP_NOFAIL
);
309 f2fs_bug_on(sbi
, !page
);
311 zero_user_segment(page
, 0, PAGE_SIZE
);
312 SetPagePrivate(page
);
313 set_page_private(page
, (unsigned long)DUMMY_WRITTEN_PAGE
);
315 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
)
319 * In the NODE case, we lose next block address chain. So, we
320 * need to do checkpoint in f2fs_sync_file.
323 set_sbi_flag(sbi
, SBI_NEED_CP
);
326 if (is_read_io(bio_op(bio
)))
327 trace_f2fs_submit_read_bio(sbi
->sb
, type
, bio
);
329 trace_f2fs_submit_write_bio(sbi
->sb
, type
, bio
);
333 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
335 struct f2fs_io_info
*fio
= &io
->fio
;
340 bio_set_op_attrs(io
->bio
, fio
->op
, fio
->op_flags
);
342 if (is_read_io(fio
->op
))
343 trace_f2fs_prepare_read_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
345 trace_f2fs_prepare_write_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
347 __submit_bio(io
->sbi
, io
->bio
, fio
->type
);
351 static bool __has_merged_page(struct bio
*bio
, struct inode
*inode
,
352 struct page
*page
, nid_t ino
)
354 struct bio_vec
*bvec
;
356 struct bvec_iter_all iter_all
;
361 if (!inode
&& !page
&& !ino
)
364 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
366 target
= bvec
->bv_page
;
367 if (fscrypt_is_bounce_page(target
))
368 target
= fscrypt_pagecache_page(target
);
370 if (inode
&& inode
== target
->mapping
->host
)
372 if (page
&& page
== target
)
374 if (ino
&& ino
== ino_of_node(target
))
381 static void __f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
,
382 enum page_type type
, enum temp_type temp
)
384 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
385 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + temp
;
387 down_write(&io
->io_rwsem
);
389 /* change META to META_FLUSH in the checkpoint procedure */
390 if (type
>= META_FLUSH
) {
391 io
->fio
.type
= META_FLUSH
;
392 io
->fio
.op
= REQ_OP_WRITE
;
393 io
->fio
.op_flags
= REQ_META
| REQ_PRIO
| REQ_SYNC
;
394 if (!test_opt(sbi
, NOBARRIER
))
395 io
->fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
397 __submit_merged_bio(io
);
398 up_write(&io
->io_rwsem
);
401 static void __submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
402 struct inode
*inode
, struct page
*page
,
403 nid_t ino
, enum page_type type
, bool force
)
408 for (temp
= HOT
; temp
< NR_TEMP_TYPE
; temp
++) {
410 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
411 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + temp
;
413 down_read(&io
->io_rwsem
);
414 ret
= __has_merged_page(io
->bio
, inode
, page
, ino
);
415 up_read(&io
->io_rwsem
);
418 __f2fs_submit_merged_write(sbi
, type
, temp
);
420 /* TODO: use HOT temp only for meta pages now. */
426 void f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
, enum page_type type
)
428 __submit_merged_write_cond(sbi
, NULL
, NULL
, 0, type
, true);
431 void f2fs_submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
432 struct inode
*inode
, struct page
*page
,
433 nid_t ino
, enum page_type type
)
435 __submit_merged_write_cond(sbi
, inode
, page
, ino
, type
, false);
438 void f2fs_flush_merged_writes(struct f2fs_sb_info
*sbi
)
440 f2fs_submit_merged_write(sbi
, DATA
);
441 f2fs_submit_merged_write(sbi
, NODE
);
442 f2fs_submit_merged_write(sbi
, META
);
446 * Fill the locked page with data located in the block address.
447 * A caller needs to unlock the page on failure.
449 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
452 struct page
*page
= fio
->encrypted_page
?
453 fio
->encrypted_page
: fio
->page
;
455 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->new_blkaddr
,
456 fio
->is_por
? META_POR
: (__is_meta_io(fio
) ?
457 META_GENERIC
: DATA_GENERIC_ENHANCE
)))
458 return -EFSCORRUPTED
;
460 trace_f2fs_submit_page_bio(page
, fio
);
461 f2fs_trace_ios(fio
, 0);
463 /* Allocate a new bio */
464 bio
= __bio_alloc(fio
->sbi
, fio
->new_blkaddr
, fio
->io_wbc
,
465 1, is_read_io(fio
->op
), fio
->type
, fio
->temp
);
467 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
472 if (fio
->io_wbc
&& !is_read_io(fio
->op
))
473 wbc_account_cgroup_owner(fio
->io_wbc
, page
, PAGE_SIZE
);
475 bio_set_op_attrs(bio
, fio
->op
, fio
->op_flags
);
477 inc_page_count(fio
->sbi
, is_read_io(fio
->op
) ?
478 __read_io_type(page
): WB_DATA_TYPE(fio
->page
));
480 __submit_bio(fio
->sbi
, bio
, fio
->type
);
484 int f2fs_merge_page_bio(struct f2fs_io_info
*fio
)
486 struct bio
*bio
= *fio
->bio
;
487 struct page
*page
= fio
->encrypted_page
?
488 fio
->encrypted_page
: fio
->page
;
490 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->new_blkaddr
,
491 __is_meta_io(fio
) ? META_GENERIC
: DATA_GENERIC
))
492 return -EFSCORRUPTED
;
494 trace_f2fs_submit_page_bio(page
, fio
);
495 f2fs_trace_ios(fio
, 0);
497 if (bio
&& (*fio
->last_block
+ 1 != fio
->new_blkaddr
||
498 !__same_bdev(fio
->sbi
, fio
->new_blkaddr
, bio
))) {
499 __submit_bio(fio
->sbi
, bio
, fio
->type
);
504 bio
= __bio_alloc(fio
->sbi
, fio
->new_blkaddr
, fio
->io_wbc
,
505 BIO_MAX_PAGES
, false, fio
->type
, fio
->temp
);
506 bio_set_op_attrs(bio
, fio
->op
, fio
->op_flags
);
509 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
510 __submit_bio(fio
->sbi
, bio
, fio
->type
);
516 wbc_account_cgroup_owner(fio
->io_wbc
, page
, PAGE_SIZE
);
518 inc_page_count(fio
->sbi
, WB_DATA_TYPE(page
));
520 *fio
->last_block
= fio
->new_blkaddr
;
526 static void f2fs_submit_ipu_bio(struct f2fs_sb_info
*sbi
, struct bio
**bio
,
532 if (!__has_merged_page(*bio
, NULL
, page
, 0))
535 __submit_bio(sbi
, *bio
, DATA
);
539 void f2fs_submit_page_write(struct f2fs_io_info
*fio
)
541 struct f2fs_sb_info
*sbi
= fio
->sbi
;
542 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
543 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + fio
->temp
;
544 struct page
*bio_page
;
546 f2fs_bug_on(sbi
, is_read_io(fio
->op
));
548 down_write(&io
->io_rwsem
);
551 spin_lock(&io
->io_lock
);
552 if (list_empty(&io
->io_list
)) {
553 spin_unlock(&io
->io_lock
);
556 fio
= list_first_entry(&io
->io_list
,
557 struct f2fs_io_info
, list
);
558 list_del(&fio
->list
);
559 spin_unlock(&io
->io_lock
);
562 verify_fio_blkaddr(fio
);
564 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
566 /* set submitted = true as a return value */
567 fio
->submitted
= true;
569 inc_page_count(sbi
, WB_DATA_TYPE(bio_page
));
571 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->new_blkaddr
- 1 ||
572 (io
->fio
.op
!= fio
->op
|| io
->fio
.op_flags
!= fio
->op_flags
) ||
573 !__same_bdev(sbi
, fio
->new_blkaddr
, io
->bio
)))
574 __submit_merged_bio(io
);
576 if (io
->bio
== NULL
) {
577 if ((fio
->type
== DATA
|| fio
->type
== NODE
) &&
578 fio
->new_blkaddr
& F2FS_IO_SIZE_MASK(sbi
)) {
579 dec_page_count(sbi
, WB_DATA_TYPE(bio_page
));
583 io
->bio
= __bio_alloc(sbi
, fio
->new_blkaddr
, fio
->io_wbc
,
584 BIO_MAX_PAGES
, false,
585 fio
->type
, fio
->temp
);
589 if (bio_add_page(io
->bio
, bio_page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
590 __submit_merged_bio(io
);
595 wbc_account_cgroup_owner(fio
->io_wbc
, bio_page
, PAGE_SIZE
);
597 io
->last_block_in_bio
= fio
->new_blkaddr
;
598 f2fs_trace_ios(fio
, 0);
600 trace_f2fs_submit_page_write(fio
->page
, fio
);
605 if (is_sbi_flag_set(sbi
, SBI_IS_SHUTDOWN
) ||
606 f2fs_is_checkpoint_ready(sbi
))
607 __submit_merged_bio(io
);
608 up_write(&io
->io_rwsem
);
611 static struct bio
*f2fs_grab_read_bio(struct inode
*inode
, block_t blkaddr
,
612 unsigned nr_pages
, unsigned op_flag
)
614 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
616 struct bio_post_read_ctx
*ctx
;
617 unsigned int post_read_steps
= 0;
619 bio
= f2fs_bio_alloc(sbi
, min_t(int, nr_pages
, BIO_MAX_PAGES
), false);
621 return ERR_PTR(-ENOMEM
);
622 f2fs_target_device(sbi
, blkaddr
, bio
);
623 bio
->bi_end_io
= f2fs_read_end_io
;
624 bio_set_op_attrs(bio
, REQ_OP_READ
, op_flag
);
626 if (f2fs_encrypted_file(inode
))
627 post_read_steps
|= 1 << STEP_DECRYPT
;
628 if (post_read_steps
) {
629 ctx
= mempool_alloc(bio_post_read_ctx_pool
, GFP_NOFS
);
632 return ERR_PTR(-ENOMEM
);
635 ctx
->enabled_steps
= post_read_steps
;
636 bio
->bi_private
= ctx
;
642 /* This can handle encryption stuffs */
643 static int f2fs_submit_page_read(struct inode
*inode
, struct page
*page
,
646 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
649 bio
= f2fs_grab_read_bio(inode
, blkaddr
, 1, 0);
653 /* wait for GCed page writeback via META_MAPPING */
654 f2fs_wait_on_block_writeback(inode
, blkaddr
);
656 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
660 ClearPageError(page
);
661 inc_page_count(sbi
, F2FS_RD_DATA
);
662 __submit_bio(sbi
, bio
, DATA
);
666 static void __set_data_blkaddr(struct dnode_of_data
*dn
)
668 struct f2fs_node
*rn
= F2FS_NODE(dn
->node_page
);
672 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
673 base
= get_extra_isize(dn
->inode
);
675 /* Get physical address of data block */
676 addr_array
= blkaddr_in_node(rn
);
677 addr_array
[base
+ dn
->ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
681 * Lock ordering for the change of data block address:
684 * update block addresses in the node page
686 void f2fs_set_data_blkaddr(struct dnode_of_data
*dn
)
688 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true, true);
689 __set_data_blkaddr(dn
);
690 if (set_page_dirty(dn
->node_page
))
691 dn
->node_changed
= true;
694 void f2fs_update_data_blkaddr(struct dnode_of_data
*dn
, block_t blkaddr
)
696 dn
->data_blkaddr
= blkaddr
;
697 f2fs_set_data_blkaddr(dn
);
698 f2fs_update_extent_cache(dn
);
701 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
702 int f2fs_reserve_new_blocks(struct dnode_of_data
*dn
, blkcnt_t count
)
704 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
710 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
712 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
715 trace_f2fs_reserve_new_blocks(dn
->inode
, dn
->nid
,
716 dn
->ofs_in_node
, count
);
718 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true, true);
720 for (; count
> 0; dn
->ofs_in_node
++) {
721 block_t blkaddr
= datablock_addr(dn
->inode
,
722 dn
->node_page
, dn
->ofs_in_node
);
723 if (blkaddr
== NULL_ADDR
) {
724 dn
->data_blkaddr
= NEW_ADDR
;
725 __set_data_blkaddr(dn
);
730 if (set_page_dirty(dn
->node_page
))
731 dn
->node_changed
= true;
735 /* Should keep dn->ofs_in_node unchanged */
736 int f2fs_reserve_new_block(struct dnode_of_data
*dn
)
738 unsigned int ofs_in_node
= dn
->ofs_in_node
;
741 ret
= f2fs_reserve_new_blocks(dn
, 1);
742 dn
->ofs_in_node
= ofs_in_node
;
746 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
748 bool need_put
= dn
->inode_page
? false : true;
751 err
= f2fs_get_dnode_of_data(dn
, index
, ALLOC_NODE
);
755 if (dn
->data_blkaddr
== NULL_ADDR
)
756 err
= f2fs_reserve_new_block(dn
);
762 int f2fs_get_block(struct dnode_of_data
*dn
, pgoff_t index
)
764 struct extent_info ei
= {0,0,0};
765 struct inode
*inode
= dn
->inode
;
767 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
768 dn
->data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
772 return f2fs_reserve_block(dn
, index
);
775 struct page
*f2fs_get_read_data_page(struct inode
*inode
, pgoff_t index
,
776 int op_flags
, bool for_write
)
778 struct address_space
*mapping
= inode
->i_mapping
;
779 struct dnode_of_data dn
;
781 struct extent_info ei
= {0,0,0};
784 page
= f2fs_grab_cache_page(mapping
, index
, for_write
);
786 return ERR_PTR(-ENOMEM
);
788 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
789 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
790 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode
), dn
.data_blkaddr
,
791 DATA_GENERIC_ENHANCE_READ
)) {
798 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
799 err
= f2fs_get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
804 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
808 if (dn
.data_blkaddr
!= NEW_ADDR
&&
809 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
811 DATA_GENERIC_ENHANCE
)) {
816 if (PageUptodate(page
)) {
822 * A new dentry page is allocated but not able to be written, since its
823 * new inode page couldn't be allocated due to -ENOSPC.
824 * In such the case, its blkaddr can be remained as NEW_ADDR.
825 * see, f2fs_add_link -> f2fs_get_new_data_page ->
826 * f2fs_init_inode_metadata.
828 if (dn
.data_blkaddr
== NEW_ADDR
) {
829 zero_user_segment(page
, 0, PAGE_SIZE
);
830 if (!PageUptodate(page
))
831 SetPageUptodate(page
);
836 err
= f2fs_submit_page_read(inode
, page
, dn
.data_blkaddr
);
842 f2fs_put_page(page
, 1);
846 struct page
*f2fs_find_data_page(struct inode
*inode
, pgoff_t index
)
848 struct address_space
*mapping
= inode
->i_mapping
;
851 page
= find_get_page(mapping
, index
);
852 if (page
&& PageUptodate(page
))
854 f2fs_put_page(page
, 0);
856 page
= f2fs_get_read_data_page(inode
, index
, 0, false);
860 if (PageUptodate(page
))
863 wait_on_page_locked(page
);
864 if (unlikely(!PageUptodate(page
))) {
865 f2fs_put_page(page
, 0);
866 return ERR_PTR(-EIO
);
872 * If it tries to access a hole, return an error.
873 * Because, the callers, functions in dir.c and GC, should be able to know
874 * whether this page exists or not.
876 struct page
*f2fs_get_lock_data_page(struct inode
*inode
, pgoff_t index
,
879 struct address_space
*mapping
= inode
->i_mapping
;
882 page
= f2fs_get_read_data_page(inode
, index
, 0, for_write
);
886 /* wait for read completion */
888 if (unlikely(page
->mapping
!= mapping
)) {
889 f2fs_put_page(page
, 1);
892 if (unlikely(!PageUptodate(page
))) {
893 f2fs_put_page(page
, 1);
894 return ERR_PTR(-EIO
);
900 * Caller ensures that this data page is never allocated.
901 * A new zero-filled data page is allocated in the page cache.
903 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
905 * Note that, ipage is set only by make_empty_dir, and if any error occur,
906 * ipage should be released by this function.
908 struct page
*f2fs_get_new_data_page(struct inode
*inode
,
909 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
911 struct address_space
*mapping
= inode
->i_mapping
;
913 struct dnode_of_data dn
;
916 page
= f2fs_grab_cache_page(mapping
, index
, true);
919 * before exiting, we should make sure ipage will be released
920 * if any error occur.
922 f2fs_put_page(ipage
, 1);
923 return ERR_PTR(-ENOMEM
);
926 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
927 err
= f2fs_reserve_block(&dn
, index
);
929 f2fs_put_page(page
, 1);
935 if (PageUptodate(page
))
938 if (dn
.data_blkaddr
== NEW_ADDR
) {
939 zero_user_segment(page
, 0, PAGE_SIZE
);
940 if (!PageUptodate(page
))
941 SetPageUptodate(page
);
943 f2fs_put_page(page
, 1);
945 /* if ipage exists, blkaddr should be NEW_ADDR */
946 f2fs_bug_on(F2FS_I_SB(inode
), ipage
);
947 page
= f2fs_get_lock_data_page(inode
, index
, true);
952 if (new_i_size
&& i_size_read(inode
) <
953 ((loff_t
)(index
+ 1) << PAGE_SHIFT
))
954 f2fs_i_size_write(inode
, ((loff_t
)(index
+ 1) << PAGE_SHIFT
));
958 static int __allocate_data_block(struct dnode_of_data
*dn
, int seg_type
)
960 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
961 struct f2fs_summary sum
;
967 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
970 err
= f2fs_get_node_info(sbi
, dn
->nid
, &ni
);
974 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
975 dn
->node_page
, dn
->ofs_in_node
);
976 if (dn
->data_blkaddr
!= NULL_ADDR
)
979 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
983 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
984 old_blkaddr
= dn
->data_blkaddr
;
985 f2fs_allocate_data_block(sbi
, NULL
, old_blkaddr
, &dn
->data_blkaddr
,
986 &sum
, seg_type
, NULL
, false);
987 if (GET_SEGNO(sbi
, old_blkaddr
) != NULL_SEGNO
)
988 invalidate_mapping_pages(META_MAPPING(sbi
),
989 old_blkaddr
, old_blkaddr
);
990 f2fs_set_data_blkaddr(dn
);
993 * i_size will be updated by direct_IO. Otherwise, we'll get stale
994 * data from unwritten block via dio_read.
999 int f2fs_preallocate_blocks(struct kiocb
*iocb
, struct iov_iter
*from
)
1001 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1002 struct f2fs_map_blocks map
;
1005 bool direct_io
= iocb
->ki_flags
& IOCB_DIRECT
;
1007 /* convert inline data for Direct I/O*/
1009 err
= f2fs_convert_inline_inode(inode
);
1014 if (direct_io
&& allow_outplace_dio(inode
, iocb
, from
))
1017 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
1020 map
.m_lblk
= F2FS_BLK_ALIGN(iocb
->ki_pos
);
1021 map
.m_len
= F2FS_BYTES_TO_BLK(iocb
->ki_pos
+ iov_iter_count(from
));
1022 if (map
.m_len
> map
.m_lblk
)
1023 map
.m_len
-= map
.m_lblk
;
1027 map
.m_next_pgofs
= NULL
;
1028 map
.m_next_extent
= NULL
;
1029 map
.m_seg_type
= NO_CHECK_TYPE
;
1030 map
.m_may_create
= true;
1033 map
.m_seg_type
= f2fs_rw_hint_to_seg_type(iocb
->ki_hint
);
1034 flag
= f2fs_force_buffered_io(inode
, iocb
, from
) ?
1035 F2FS_GET_BLOCK_PRE_AIO
:
1036 F2FS_GET_BLOCK_PRE_DIO
;
1039 if (iocb
->ki_pos
+ iov_iter_count(from
) > MAX_INLINE_DATA(inode
)) {
1040 err
= f2fs_convert_inline_inode(inode
);
1044 if (f2fs_has_inline_data(inode
))
1047 flag
= F2FS_GET_BLOCK_PRE_AIO
;
1050 err
= f2fs_map_blocks(inode
, &map
, 1, flag
);
1051 if (map
.m_len
> 0 && err
== -ENOSPC
) {
1053 set_inode_flag(inode
, FI_NO_PREALLOC
);
1059 void __do_map_lock(struct f2fs_sb_info
*sbi
, int flag
, bool lock
)
1061 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
1063 down_read(&sbi
->node_change
);
1065 up_read(&sbi
->node_change
);
1070 f2fs_unlock_op(sbi
);
1075 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1076 * f2fs_map_blocks structure.
1077 * If original data blocks are allocated, then give them to blockdev.
1079 * a. preallocate requested block addresses
1080 * b. do not use extent cache for better performance
1081 * c. give the block addresses to blockdev
1083 int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
1084 int create
, int flag
)
1086 unsigned int maxblocks
= map
->m_len
;
1087 struct dnode_of_data dn
;
1088 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1089 int mode
= map
->m_may_create
? ALLOC_NODE
: LOOKUP_NODE
;
1090 pgoff_t pgofs
, end_offset
, end
;
1091 int err
= 0, ofs
= 1;
1092 unsigned int ofs_in_node
, last_ofs_in_node
;
1094 struct extent_info ei
= {0,0,0};
1096 unsigned int start_pgofs
;
1104 /* it only supports block size == page size */
1105 pgofs
= (pgoff_t
)map
->m_lblk
;
1106 end
= pgofs
+ maxblocks
;
1108 if (!create
&& f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
1109 if (test_opt(sbi
, LFS
) && flag
== F2FS_GET_BLOCK_DIO
&&
1113 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
1114 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
1115 map
->m_flags
= F2FS_MAP_MAPPED
;
1116 if (map
->m_next_extent
)
1117 *map
->m_next_extent
= pgofs
+ map
->m_len
;
1119 /* for hardware encryption, but to avoid potential issue in future */
1120 if (flag
== F2FS_GET_BLOCK_DIO
)
1121 f2fs_wait_on_block_writeback_range(inode
,
1122 map
->m_pblk
, map
->m_len
);
1127 if (map
->m_may_create
)
1128 __do_map_lock(sbi
, flag
, true);
1130 /* When reading holes, we need its node page */
1131 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1132 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, mode
);
1134 if (flag
== F2FS_GET_BLOCK_BMAP
)
1136 if (err
== -ENOENT
) {
1138 if (map
->m_next_pgofs
)
1139 *map
->m_next_pgofs
=
1140 f2fs_get_next_page_offset(&dn
, pgofs
);
1141 if (map
->m_next_extent
)
1142 *map
->m_next_extent
=
1143 f2fs_get_next_page_offset(&dn
, pgofs
);
1148 start_pgofs
= pgofs
;
1150 last_ofs_in_node
= ofs_in_node
= dn
.ofs_in_node
;
1151 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1154 blkaddr
= datablock_addr(dn
.inode
, dn
.node_page
, dn
.ofs_in_node
);
1156 if (__is_valid_data_blkaddr(blkaddr
) &&
1157 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC_ENHANCE
)) {
1158 err
= -EFSCORRUPTED
;
1162 if (__is_valid_data_blkaddr(blkaddr
)) {
1163 /* use out-place-update for driect IO under LFS mode */
1164 if (test_opt(sbi
, LFS
) && flag
== F2FS_GET_BLOCK_DIO
&&
1165 map
->m_may_create
) {
1166 err
= __allocate_data_block(&dn
, map
->m_seg_type
);
1168 blkaddr
= dn
.data_blkaddr
;
1169 set_inode_flag(inode
, FI_APPEND_WRITE
);
1174 if (unlikely(f2fs_cp_error(sbi
))) {
1178 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
1179 if (blkaddr
== NULL_ADDR
) {
1181 last_ofs_in_node
= dn
.ofs_in_node
;
1184 WARN_ON(flag
!= F2FS_GET_BLOCK_PRE_DIO
&&
1185 flag
!= F2FS_GET_BLOCK_DIO
);
1186 err
= __allocate_data_block(&dn
,
1189 set_inode_flag(inode
, FI_APPEND_WRITE
);
1193 map
->m_flags
|= F2FS_MAP_NEW
;
1194 blkaddr
= dn
.data_blkaddr
;
1196 if (flag
== F2FS_GET_BLOCK_BMAP
) {
1200 if (flag
== F2FS_GET_BLOCK_PRECACHE
)
1202 if (flag
== F2FS_GET_BLOCK_FIEMAP
&&
1203 blkaddr
== NULL_ADDR
) {
1204 if (map
->m_next_pgofs
)
1205 *map
->m_next_pgofs
= pgofs
+ 1;
1208 if (flag
!= F2FS_GET_BLOCK_FIEMAP
) {
1209 /* for defragment case */
1210 if (map
->m_next_pgofs
)
1211 *map
->m_next_pgofs
= pgofs
+ 1;
1217 if (flag
== F2FS_GET_BLOCK_PRE_AIO
)
1220 if (map
->m_len
== 0) {
1221 /* preallocated unwritten block should be mapped for fiemap. */
1222 if (blkaddr
== NEW_ADDR
)
1223 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
1224 map
->m_flags
|= F2FS_MAP_MAPPED
;
1226 map
->m_pblk
= blkaddr
;
1228 } else if ((map
->m_pblk
!= NEW_ADDR
&&
1229 blkaddr
== (map
->m_pblk
+ ofs
)) ||
1230 (map
->m_pblk
== NEW_ADDR
&& blkaddr
== NEW_ADDR
) ||
1231 flag
== F2FS_GET_BLOCK_PRE_DIO
) {
1242 /* preallocate blocks in batch for one dnode page */
1243 if (flag
== F2FS_GET_BLOCK_PRE_AIO
&&
1244 (pgofs
== end
|| dn
.ofs_in_node
== end_offset
)) {
1246 dn
.ofs_in_node
= ofs_in_node
;
1247 err
= f2fs_reserve_new_blocks(&dn
, prealloc
);
1251 map
->m_len
+= dn
.ofs_in_node
- ofs_in_node
;
1252 if (prealloc
&& dn
.ofs_in_node
!= last_ofs_in_node
+ 1) {
1256 dn
.ofs_in_node
= end_offset
;
1261 else if (dn
.ofs_in_node
< end_offset
)
1264 if (flag
== F2FS_GET_BLOCK_PRECACHE
) {
1265 if (map
->m_flags
& F2FS_MAP_MAPPED
) {
1266 unsigned int ofs
= start_pgofs
- map
->m_lblk
;
1268 f2fs_update_extent_cache_range(&dn
,
1269 start_pgofs
, map
->m_pblk
+ ofs
,
1274 f2fs_put_dnode(&dn
);
1276 if (map
->m_may_create
) {
1277 __do_map_lock(sbi
, flag
, false);
1278 f2fs_balance_fs(sbi
, dn
.node_changed
);
1284 /* for hardware encryption, but to avoid potential issue in future */
1285 if (flag
== F2FS_GET_BLOCK_DIO
&& map
->m_flags
& F2FS_MAP_MAPPED
)
1286 f2fs_wait_on_block_writeback_range(inode
,
1287 map
->m_pblk
, map
->m_len
);
1289 if (flag
== F2FS_GET_BLOCK_PRECACHE
) {
1290 if (map
->m_flags
& F2FS_MAP_MAPPED
) {
1291 unsigned int ofs
= start_pgofs
- map
->m_lblk
;
1293 f2fs_update_extent_cache_range(&dn
,
1294 start_pgofs
, map
->m_pblk
+ ofs
,
1297 if (map
->m_next_extent
)
1298 *map
->m_next_extent
= pgofs
+ 1;
1300 f2fs_put_dnode(&dn
);
1302 if (map
->m_may_create
) {
1303 __do_map_lock(sbi
, flag
, false);
1304 f2fs_balance_fs(sbi
, dn
.node_changed
);
1307 trace_f2fs_map_blocks(inode
, map
, err
);
1311 bool f2fs_overwrite_io(struct inode
*inode
, loff_t pos
, size_t len
)
1313 struct f2fs_map_blocks map
;
1317 if (pos
+ len
> i_size_read(inode
))
1320 map
.m_lblk
= F2FS_BYTES_TO_BLK(pos
);
1321 map
.m_next_pgofs
= NULL
;
1322 map
.m_next_extent
= NULL
;
1323 map
.m_seg_type
= NO_CHECK_TYPE
;
1324 map
.m_may_create
= false;
1325 last_lblk
= F2FS_BLK_ALIGN(pos
+ len
);
1327 while (map
.m_lblk
< last_lblk
) {
1328 map
.m_len
= last_lblk
- map
.m_lblk
;
1329 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
1330 if (err
|| map
.m_len
== 0)
1332 map
.m_lblk
+= map
.m_len
;
1337 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1338 struct buffer_head
*bh
, int create
, int flag
,
1339 pgoff_t
*next_pgofs
, int seg_type
, bool may_write
)
1341 struct f2fs_map_blocks map
;
1344 map
.m_lblk
= iblock
;
1345 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
1346 map
.m_next_pgofs
= next_pgofs
;
1347 map
.m_next_extent
= NULL
;
1348 map
.m_seg_type
= seg_type
;
1349 map
.m_may_create
= may_write
;
1351 err
= f2fs_map_blocks(inode
, &map
, create
, flag
);
1353 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
1354 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
1355 bh
->b_size
= (u64
)map
.m_len
<< inode
->i_blkbits
;
1360 static int get_data_block(struct inode
*inode
, sector_t iblock
,
1361 struct buffer_head
*bh_result
, int create
, int flag
,
1362 pgoff_t
*next_pgofs
)
1364 return __get_data_block(inode
, iblock
, bh_result
, create
,
1366 NO_CHECK_TYPE
, create
);
1369 static int get_data_block_dio_write(struct inode
*inode
, sector_t iblock
,
1370 struct buffer_head
*bh_result
, int create
)
1372 return __get_data_block(inode
, iblock
, bh_result
, create
,
1373 F2FS_GET_BLOCK_DIO
, NULL
,
1374 f2fs_rw_hint_to_seg_type(inode
->i_write_hint
),
1378 static int get_data_block_dio(struct inode
*inode
, sector_t iblock
,
1379 struct buffer_head
*bh_result
, int create
)
1381 return __get_data_block(inode
, iblock
, bh_result
, create
,
1382 F2FS_GET_BLOCK_DIO
, NULL
,
1383 f2fs_rw_hint_to_seg_type(inode
->i_write_hint
),
1387 static int get_data_block_bmap(struct inode
*inode
, sector_t iblock
,
1388 struct buffer_head
*bh_result
, int create
)
1390 /* Block number less than F2FS MAX BLOCKS */
1391 if (unlikely(iblock
>= F2FS_I_SB(inode
)->max_file_blocks
))
1394 return __get_data_block(inode
, iblock
, bh_result
, create
,
1395 F2FS_GET_BLOCK_BMAP
, NULL
,
1396 NO_CHECK_TYPE
, create
);
1399 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
1401 return (offset
>> inode
->i_blkbits
);
1404 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
1406 return (blk
<< inode
->i_blkbits
);
1409 static int f2fs_xattr_fiemap(struct inode
*inode
,
1410 struct fiemap_extent_info
*fieinfo
)
1412 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1414 struct node_info ni
;
1415 __u64 phys
= 0, len
;
1417 nid_t xnid
= F2FS_I(inode
)->i_xattr_nid
;
1420 if (f2fs_has_inline_xattr(inode
)) {
1423 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
),
1424 inode
->i_ino
, false);
1428 err
= f2fs_get_node_info(sbi
, inode
->i_ino
, &ni
);
1430 f2fs_put_page(page
, 1);
1434 phys
= (__u64
)blk_to_logical(inode
, ni
.blk_addr
);
1435 offset
= offsetof(struct f2fs_inode
, i_addr
) +
1436 sizeof(__le32
) * (DEF_ADDRS_PER_INODE
-
1437 get_inline_xattr_addrs(inode
));
1440 len
= inline_xattr_size(inode
);
1442 f2fs_put_page(page
, 1);
1444 flags
= FIEMAP_EXTENT_DATA_INLINE
| FIEMAP_EXTENT_NOT_ALIGNED
;
1447 flags
|= FIEMAP_EXTENT_LAST
;
1449 err
= fiemap_fill_next_extent(fieinfo
, 0, phys
, len
, flags
);
1450 if (err
|| err
== 1)
1455 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), xnid
, false);
1459 err
= f2fs_get_node_info(sbi
, xnid
, &ni
);
1461 f2fs_put_page(page
, 1);
1465 phys
= (__u64
)blk_to_logical(inode
, ni
.blk_addr
);
1466 len
= inode
->i_sb
->s_blocksize
;
1468 f2fs_put_page(page
, 1);
1470 flags
= FIEMAP_EXTENT_LAST
;
1474 err
= fiemap_fill_next_extent(fieinfo
, 0, phys
, len
, flags
);
1476 return (err
< 0 ? err
: 0);
1479 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1482 struct buffer_head map_bh
;
1483 sector_t start_blk
, last_blk
;
1485 u64 logical
= 0, phys
= 0, size
= 0;
1489 if (fieinfo
->fi_flags
& FIEMAP_FLAG_CACHE
) {
1490 ret
= f2fs_precache_extents(inode
);
1495 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
| FIEMAP_FLAG_XATTR
);
1501 if (fieinfo
->fi_flags
& FIEMAP_FLAG_XATTR
) {
1502 ret
= f2fs_xattr_fiemap(inode
, fieinfo
);
1506 if (f2fs_has_inline_data(inode
)) {
1507 ret
= f2fs_inline_data_fiemap(inode
, fieinfo
, start
, len
);
1512 if (logical_to_blk(inode
, len
) == 0)
1513 len
= blk_to_logical(inode
, 1);
1515 start_blk
= logical_to_blk(inode
, start
);
1516 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
1519 memset(&map_bh
, 0, sizeof(struct buffer_head
));
1520 map_bh
.b_size
= len
;
1522 ret
= get_data_block(inode
, start_blk
, &map_bh
, 0,
1523 F2FS_GET_BLOCK_FIEMAP
, &next_pgofs
);
1528 if (!buffer_mapped(&map_bh
)) {
1529 start_blk
= next_pgofs
;
1531 if (blk_to_logical(inode
, start_blk
) < blk_to_logical(inode
,
1532 F2FS_I_SB(inode
)->max_file_blocks
))
1535 flags
|= FIEMAP_EXTENT_LAST
;
1539 if (IS_ENCRYPTED(inode
))
1540 flags
|= FIEMAP_EXTENT_DATA_ENCRYPTED
;
1542 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1546 if (start_blk
> last_blk
|| ret
)
1549 logical
= blk_to_logical(inode
, start_blk
);
1550 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
1551 size
= map_bh
.b_size
;
1553 if (buffer_unwritten(&map_bh
))
1554 flags
= FIEMAP_EXTENT_UNWRITTEN
;
1556 start_blk
+= logical_to_blk(inode
, size
);
1560 if (fatal_signal_pending(current
))
1568 inode_unlock(inode
);
1572 static int f2fs_read_single_page(struct inode
*inode
, struct page
*page
,
1574 struct f2fs_map_blocks
*map
,
1575 struct bio
**bio_ret
,
1576 sector_t
*last_block_in_bio
,
1579 struct bio
*bio
= *bio_ret
;
1580 const unsigned blkbits
= inode
->i_blkbits
;
1581 const unsigned blocksize
= 1 << blkbits
;
1582 sector_t block_in_file
;
1583 sector_t last_block
;
1584 sector_t last_block_in_file
;
1588 block_in_file
= (sector_t
)page_index(page
);
1589 last_block
= block_in_file
+ nr_pages
;
1590 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
1592 if (last_block
> last_block_in_file
)
1593 last_block
= last_block_in_file
;
1595 /* just zeroing out page which is beyond EOF */
1596 if (block_in_file
>= last_block
)
1599 * Map blocks using the previous result first.
1601 if ((map
->m_flags
& F2FS_MAP_MAPPED
) &&
1602 block_in_file
> map
->m_lblk
&&
1603 block_in_file
< (map
->m_lblk
+ map
->m_len
))
1607 * Then do more f2fs_map_blocks() calls until we are
1608 * done with this page.
1610 map
->m_lblk
= block_in_file
;
1611 map
->m_len
= last_block
- block_in_file
;
1613 ret
= f2fs_map_blocks(inode
, map
, 0, F2FS_GET_BLOCK_DEFAULT
);
1617 if ((map
->m_flags
& F2FS_MAP_MAPPED
)) {
1618 block_nr
= map
->m_pblk
+ block_in_file
- map
->m_lblk
;
1619 SetPageMappedToDisk(page
);
1621 if (!PageUptodate(page
) && (!PageSwapCache(page
) &&
1622 !cleancache_get_page(page
))) {
1623 SetPageUptodate(page
);
1627 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode
), block_nr
,
1628 DATA_GENERIC_ENHANCE_READ
)) {
1629 ret
= -EFSCORRUPTED
;
1634 zero_user_segment(page
, 0, PAGE_SIZE
);
1635 if (!PageUptodate(page
))
1636 SetPageUptodate(page
);
1642 * This page will go to BIO. Do we need to send this
1645 if (bio
&& (*last_block_in_bio
!= block_nr
- 1 ||
1646 !__same_bdev(F2FS_I_SB(inode
), block_nr
, bio
))) {
1648 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1652 bio
= f2fs_grab_read_bio(inode
, block_nr
, nr_pages
,
1653 is_readahead
? REQ_RAHEAD
: 0);
1662 * If the page is under writeback, we need to wait for
1663 * its completion to see the correct decrypted data.
1665 f2fs_wait_on_block_writeback(inode
, block_nr
);
1667 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1668 goto submit_and_realloc
;
1670 inc_page_count(F2FS_I_SB(inode
), F2FS_RD_DATA
);
1671 ClearPageError(page
);
1672 *last_block_in_bio
= block_nr
;
1676 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1686 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1687 * Major change was from block_size == page_size in f2fs by default.
1689 * Note that the aops->readpages() function is ONLY used for read-ahead. If
1690 * this function ever deviates from doing just read-ahead, it should either
1691 * use ->readpage() or do the necessary surgery to decouple ->readpages()
1694 static int f2fs_mpage_readpages(struct address_space
*mapping
,
1695 struct list_head
*pages
, struct page
*page
,
1696 unsigned nr_pages
, bool is_readahead
)
1698 struct bio
*bio
= NULL
;
1699 sector_t last_block_in_bio
= 0;
1700 struct inode
*inode
= mapping
->host
;
1701 struct f2fs_map_blocks map
;
1708 map
.m_next_pgofs
= NULL
;
1709 map
.m_next_extent
= NULL
;
1710 map
.m_seg_type
= NO_CHECK_TYPE
;
1711 map
.m_may_create
= false;
1713 for (; nr_pages
; nr_pages
--) {
1715 page
= list_last_entry(pages
, struct page
, lru
);
1717 prefetchw(&page
->flags
);
1718 list_del(&page
->lru
);
1719 if (add_to_page_cache_lru(page
, mapping
,
1721 readahead_gfp_mask(mapping
)))
1725 ret
= f2fs_read_single_page(inode
, page
, nr_pages
, &map
, &bio
,
1726 &last_block_in_bio
, is_readahead
);
1729 zero_user_segment(page
, 0, PAGE_SIZE
);
1736 BUG_ON(pages
&& !list_empty(pages
));
1738 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1739 return pages
? 0 : ret
;
1742 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1744 struct inode
*inode
= page_file_mapping(page
)->host
;
1747 trace_f2fs_readpage(page
, DATA
);
1749 /* If the file has inline data, try to read it directly */
1750 if (f2fs_has_inline_data(inode
))
1751 ret
= f2fs_read_inline_data(inode
, page
);
1753 ret
= f2fs_mpage_readpages(page_file_mapping(page
),
1754 NULL
, page
, 1, false);
1758 static int f2fs_read_data_pages(struct file
*file
,
1759 struct address_space
*mapping
,
1760 struct list_head
*pages
, unsigned nr_pages
)
1762 struct inode
*inode
= mapping
->host
;
1763 struct page
*page
= list_last_entry(pages
, struct page
, lru
);
1765 trace_f2fs_readpages(inode
, page
, nr_pages
);
1767 /* If the file has inline data, skip readpages */
1768 if (f2fs_has_inline_data(inode
))
1771 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
, true);
1774 static int encrypt_one_page(struct f2fs_io_info
*fio
)
1776 struct inode
*inode
= fio
->page
->mapping
->host
;
1778 gfp_t gfp_flags
= GFP_NOFS
;
1780 if (!f2fs_encrypted_file(inode
))
1783 /* wait for GCed page writeback via META_MAPPING */
1784 f2fs_wait_on_block_writeback(inode
, fio
->old_blkaddr
);
1787 fio
->encrypted_page
= fscrypt_encrypt_pagecache_blocks(fio
->page
,
1790 if (IS_ERR(fio
->encrypted_page
)) {
1791 /* flush pending IOs and wait for a while in the ENOMEM case */
1792 if (PTR_ERR(fio
->encrypted_page
) == -ENOMEM
) {
1793 f2fs_flush_merged_writes(fio
->sbi
);
1794 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1795 gfp_flags
|= __GFP_NOFAIL
;
1798 return PTR_ERR(fio
->encrypted_page
);
1801 mpage
= find_lock_page(META_MAPPING(fio
->sbi
), fio
->old_blkaddr
);
1803 if (PageUptodate(mpage
))
1804 memcpy(page_address(mpage
),
1805 page_address(fio
->encrypted_page
), PAGE_SIZE
);
1806 f2fs_put_page(mpage
, 1);
1811 static inline bool check_inplace_update_policy(struct inode
*inode
,
1812 struct f2fs_io_info
*fio
)
1814 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1815 unsigned int policy
= SM_I(sbi
)->ipu_policy
;
1817 if (policy
& (0x1 << F2FS_IPU_FORCE
))
1819 if (policy
& (0x1 << F2FS_IPU_SSR
) && f2fs_need_SSR(sbi
))
1821 if (policy
& (0x1 << F2FS_IPU_UTIL
) &&
1822 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
1824 if (policy
& (0x1 << F2FS_IPU_SSR_UTIL
) && f2fs_need_SSR(sbi
) &&
1825 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
1829 * IPU for rewrite async pages
1831 if (policy
& (0x1 << F2FS_IPU_ASYNC
) &&
1832 fio
&& fio
->op
== REQ_OP_WRITE
&&
1833 !(fio
->op_flags
& REQ_SYNC
) &&
1834 !IS_ENCRYPTED(inode
))
1837 /* this is only set during fdatasync */
1838 if (policy
& (0x1 << F2FS_IPU_FSYNC
) &&
1839 is_inode_flag_set(inode
, FI_NEED_IPU
))
1842 if (unlikely(fio
&& is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
1843 !f2fs_is_checkpointed_data(sbi
, fio
->old_blkaddr
)))
1849 bool f2fs_should_update_inplace(struct inode
*inode
, struct f2fs_io_info
*fio
)
1851 if (f2fs_is_pinned_file(inode
))
1854 /* if this is cold file, we should overwrite to avoid fragmentation */
1855 if (file_is_cold(inode
))
1858 return check_inplace_update_policy(inode
, fio
);
1861 bool f2fs_should_update_outplace(struct inode
*inode
, struct f2fs_io_info
*fio
)
1863 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1865 if (test_opt(sbi
, LFS
))
1867 if (S_ISDIR(inode
->i_mode
))
1869 if (IS_NOQUOTA(inode
))
1871 if (f2fs_is_atomic_file(inode
))
1874 if (is_cold_data(fio
->page
))
1876 if (IS_ATOMIC_WRITTEN_PAGE(fio
->page
))
1878 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
1879 f2fs_is_checkpointed_data(sbi
, fio
->old_blkaddr
)))
1885 static inline bool need_inplace_update(struct f2fs_io_info
*fio
)
1887 struct inode
*inode
= fio
->page
->mapping
->host
;
1889 if (f2fs_should_update_outplace(inode
, fio
))
1892 return f2fs_should_update_inplace(inode
, fio
);
1895 int f2fs_do_write_data_page(struct f2fs_io_info
*fio
)
1897 struct page
*page
= fio
->page
;
1898 struct inode
*inode
= page
->mapping
->host
;
1899 struct dnode_of_data dn
;
1900 struct extent_info ei
= {0,0,0};
1901 struct node_info ni
;
1902 bool ipu_force
= false;
1905 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1906 if (need_inplace_update(fio
) &&
1907 f2fs_lookup_extent_cache(inode
, page
->index
, &ei
)) {
1908 fio
->old_blkaddr
= ei
.blk
+ page
->index
- ei
.fofs
;
1910 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->old_blkaddr
,
1911 DATA_GENERIC_ENHANCE
))
1912 return -EFSCORRUPTED
;
1915 fio
->need_lock
= LOCK_DONE
;
1919 /* Deadlock due to between page->lock and f2fs_lock_op */
1920 if (fio
->need_lock
== LOCK_REQ
&& !f2fs_trylock_op(fio
->sbi
))
1923 err
= f2fs_get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1927 fio
->old_blkaddr
= dn
.data_blkaddr
;
1929 /* This page is already truncated */
1930 if (fio
->old_blkaddr
== NULL_ADDR
) {
1931 ClearPageUptodate(page
);
1932 clear_cold_data(page
);
1936 if (__is_valid_data_blkaddr(fio
->old_blkaddr
) &&
1937 !f2fs_is_valid_blkaddr(fio
->sbi
, fio
->old_blkaddr
,
1938 DATA_GENERIC_ENHANCE
)) {
1939 err
= -EFSCORRUPTED
;
1943 * If current allocation needs SSR,
1944 * it had better in-place writes for updated data.
1947 (__is_valid_data_blkaddr(fio
->old_blkaddr
) &&
1948 need_inplace_update(fio
))) {
1949 err
= encrypt_one_page(fio
);
1953 set_page_writeback(page
);
1954 ClearPageError(page
);
1955 f2fs_put_dnode(&dn
);
1956 if (fio
->need_lock
== LOCK_REQ
)
1957 f2fs_unlock_op(fio
->sbi
);
1958 err
= f2fs_inplace_write_data(fio
);
1960 if (f2fs_encrypted_file(inode
))
1961 fscrypt_finalize_bounce_page(&fio
->encrypted_page
);
1962 if (PageWriteback(page
))
1963 end_page_writeback(page
);
1965 set_inode_flag(inode
, FI_UPDATE_WRITE
);
1967 trace_f2fs_do_write_data_page(fio
->page
, IPU
);
1971 if (fio
->need_lock
== LOCK_RETRY
) {
1972 if (!f2fs_trylock_op(fio
->sbi
)) {
1976 fio
->need_lock
= LOCK_REQ
;
1979 err
= f2fs_get_node_info(fio
->sbi
, dn
.nid
, &ni
);
1983 fio
->version
= ni
.version
;
1985 err
= encrypt_one_page(fio
);
1989 set_page_writeback(page
);
1990 ClearPageError(page
);
1992 /* LFS mode write path */
1993 f2fs_outplace_write_data(&dn
, fio
);
1994 trace_f2fs_do_write_data_page(page
, OPU
);
1995 set_inode_flag(inode
, FI_APPEND_WRITE
);
1996 if (page
->index
== 0)
1997 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1999 f2fs_put_dnode(&dn
);
2001 if (fio
->need_lock
== LOCK_REQ
)
2002 f2fs_unlock_op(fio
->sbi
);
2006 static int __write_data_page(struct page
*page
, bool *submitted
,
2008 sector_t
*last_block
,
2009 struct writeback_control
*wbc
,
2010 enum iostat_type io_type
)
2012 struct inode
*inode
= page
->mapping
->host
;
2013 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2014 loff_t i_size
= i_size_read(inode
);
2015 const pgoff_t end_index
= ((unsigned long long) i_size
)
2017 loff_t psize
= (page
->index
+ 1) << PAGE_SHIFT
;
2018 unsigned offset
= 0;
2019 bool need_balance_fs
= false;
2021 struct f2fs_io_info fio
= {
2023 .ino
= inode
->i_ino
,
2026 .op_flags
= wbc_to_write_flags(wbc
),
2027 .old_blkaddr
= NULL_ADDR
,
2029 .encrypted_page
= NULL
,
2031 .need_lock
= LOCK_RETRY
,
2035 .last_block
= last_block
,
2038 trace_f2fs_writepage(page
, DATA
);
2040 /* we should bypass data pages to proceed the kworkder jobs */
2041 if (unlikely(f2fs_cp_error(sbi
))) {
2042 mapping_set_error(page
->mapping
, -EIO
);
2044 * don't drop any dirty dentry pages for keeping lastest
2045 * directory structure.
2047 if (S_ISDIR(inode
->i_mode
))
2052 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
2055 if (page
->index
< end_index
)
2059 * If the offset is out-of-range of file size,
2060 * this page does not have to be written to disk.
2062 offset
= i_size
& (PAGE_SIZE
- 1);
2063 if ((page
->index
>= end_index
+ 1) || !offset
)
2066 zero_user_segment(page
, offset
, PAGE_SIZE
);
2068 if (f2fs_is_drop_cache(inode
))
2070 /* we should not write 0'th page having journal header */
2071 if (f2fs_is_volatile_file(inode
) && (!page
->index
||
2072 (!wbc
->for_reclaim
&&
2073 f2fs_available_free_memory(sbi
, BASE_CHECK
))))
2076 /* Dentry blocks are controlled by checkpoint */
2077 if (S_ISDIR(inode
->i_mode
)) {
2078 fio
.need_lock
= LOCK_DONE
;
2079 err
= f2fs_do_write_data_page(&fio
);
2083 if (!wbc
->for_reclaim
)
2084 need_balance_fs
= true;
2085 else if (has_not_enough_free_secs(sbi
, 0, 0))
2088 set_inode_flag(inode
, FI_HOT_DATA
);
2091 if (f2fs_has_inline_data(inode
)) {
2092 err
= f2fs_write_inline_data(inode
, page
);
2097 if (err
== -EAGAIN
) {
2098 err
= f2fs_do_write_data_page(&fio
);
2099 if (err
== -EAGAIN
) {
2100 fio
.need_lock
= LOCK_REQ
;
2101 err
= f2fs_do_write_data_page(&fio
);
2106 file_set_keep_isize(inode
);
2108 down_write(&F2FS_I(inode
)->i_sem
);
2109 if (F2FS_I(inode
)->last_disk_size
< psize
)
2110 F2FS_I(inode
)->last_disk_size
= psize
;
2111 up_write(&F2FS_I(inode
)->i_sem
);
2115 if (err
&& err
!= -ENOENT
)
2119 inode_dec_dirty_pages(inode
);
2121 ClearPageUptodate(page
);
2122 clear_cold_data(page
);
2125 if (wbc
->for_reclaim
) {
2126 f2fs_submit_merged_write_cond(sbi
, NULL
, page
, 0, DATA
);
2127 clear_inode_flag(inode
, FI_HOT_DATA
);
2128 f2fs_remove_dirty_inode(inode
);
2133 if (!S_ISDIR(inode
->i_mode
) && !IS_NOQUOTA(inode
) &&
2134 !F2FS_I(inode
)->cp_task
) {
2135 f2fs_submit_ipu_bio(sbi
, bio
, page
);
2136 f2fs_balance_fs(sbi
, need_balance_fs
);
2139 if (unlikely(f2fs_cp_error(sbi
))) {
2140 f2fs_submit_ipu_bio(sbi
, bio
, page
);
2141 f2fs_submit_merged_write(sbi
, DATA
);
2146 *submitted
= fio
.submitted
;
2151 redirty_page_for_writepage(wbc
, page
);
2153 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2154 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2155 * file_write_and_wait_range() will see EIO error, which is critical
2156 * to return value of fsync() followed by atomic_write failure to user.
2158 if (!err
|| wbc
->for_reclaim
)
2159 return AOP_WRITEPAGE_ACTIVATE
;
2164 static int f2fs_write_data_page(struct page
*page
,
2165 struct writeback_control
*wbc
)
2167 return __write_data_page(page
, NULL
, NULL
, NULL
, wbc
, FS_DATA_IO
);
2171 * This function was copied from write_cche_pages from mm/page-writeback.c.
2172 * The major change is making write step of cold data page separately from
2173 * warm/hot data page.
2175 static int f2fs_write_cache_pages(struct address_space
*mapping
,
2176 struct writeback_control
*wbc
,
2177 enum iostat_type io_type
)
2181 struct pagevec pvec
;
2182 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
2183 struct bio
*bio
= NULL
;
2184 sector_t last_block
;
2186 pgoff_t
uninitialized_var(writeback_index
);
2188 pgoff_t end
; /* Inclusive */
2191 int range_whole
= 0;
2195 pagevec_init(&pvec
);
2197 if (get_dirty_pages(mapping
->host
) <=
2198 SM_I(F2FS_M_SB(mapping
))->min_hot_blocks
)
2199 set_inode_flag(mapping
->host
, FI_HOT_DATA
);
2201 clear_inode_flag(mapping
->host
, FI_HOT_DATA
);
2203 if (wbc
->range_cyclic
) {
2204 writeback_index
= mapping
->writeback_index
; /* prev offset */
2205 index
= writeback_index
;
2212 index
= wbc
->range_start
>> PAGE_SHIFT
;
2213 end
= wbc
->range_end
>> PAGE_SHIFT
;
2214 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
2216 cycled
= 1; /* ignore range_cyclic tests */
2218 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2219 tag
= PAGECACHE_TAG_TOWRITE
;
2221 tag
= PAGECACHE_TAG_DIRTY
;
2223 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2224 tag_pages_for_writeback(mapping
, index
, end
);
2226 while (!done
&& (index
<= end
)) {
2229 nr_pages
= pagevec_lookup_range_tag(&pvec
, mapping
, &index
, end
,
2234 for (i
= 0; i
< nr_pages
; i
++) {
2235 struct page
*page
= pvec
.pages
[i
];
2236 bool submitted
= false;
2238 /* give a priority to WB_SYNC threads */
2239 if (atomic_read(&sbi
->wb_sync_req
[DATA
]) &&
2240 wbc
->sync_mode
== WB_SYNC_NONE
) {
2245 done_index
= page
->index
;
2249 if (unlikely(page
->mapping
!= mapping
)) {
2255 if (!PageDirty(page
)) {
2256 /* someone wrote it for us */
2257 goto continue_unlock
;
2260 if (PageWriteback(page
)) {
2261 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
2262 f2fs_wait_on_page_writeback(page
,
2264 f2fs_submit_ipu_bio(sbi
, &bio
, page
);
2266 goto continue_unlock
;
2270 if (!clear_page_dirty_for_io(page
))
2271 goto continue_unlock
;
2273 ret
= __write_data_page(page
, &submitted
, &bio
,
2274 &last_block
, wbc
, io_type
);
2275 if (unlikely(ret
)) {
2277 * keep nr_to_write, since vfs uses this to
2278 * get # of written pages.
2280 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
2284 } else if (ret
== -EAGAIN
) {
2286 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
2288 congestion_wait(BLK_RW_ASYNC
,
2294 done_index
= page
->index
+ 1;
2297 } else if (submitted
) {
2301 if (--wbc
->nr_to_write
<= 0 &&
2302 wbc
->sync_mode
== WB_SYNC_NONE
) {
2307 pagevec_release(&pvec
);
2311 if (!cycled
&& !done
) {
2314 end
= writeback_index
- 1;
2317 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
2318 mapping
->writeback_index
= done_index
;
2321 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping
), mapping
->host
,
2323 /* submit cached bio of IPU write */
2325 __submit_bio(sbi
, bio
, DATA
);
2330 static inline bool __should_serialize_io(struct inode
*inode
,
2331 struct writeback_control
*wbc
)
2333 if (!S_ISREG(inode
->i_mode
))
2335 if (IS_NOQUOTA(inode
))
2337 /* to avoid deadlock in path of data flush */
2338 if (F2FS_I(inode
)->cp_task
)
2340 if (wbc
->sync_mode
!= WB_SYNC_ALL
)
2342 if (get_dirty_pages(inode
) >= SM_I(F2FS_I_SB(inode
))->min_seq_blocks
)
2347 static int __f2fs_write_data_pages(struct address_space
*mapping
,
2348 struct writeback_control
*wbc
,
2349 enum iostat_type io_type
)
2351 struct inode
*inode
= mapping
->host
;
2352 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2353 struct blk_plug plug
;
2355 bool locked
= false;
2357 /* deal with chardevs and other special file */
2358 if (!mapping
->a_ops
->writepage
)
2361 /* skip writing if there is no dirty page in this inode */
2362 if (!get_dirty_pages(inode
) && wbc
->sync_mode
== WB_SYNC_NONE
)
2365 /* during POR, we don't need to trigger writepage at all. */
2366 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
2369 if ((S_ISDIR(inode
->i_mode
) || IS_NOQUOTA(inode
)) &&
2370 wbc
->sync_mode
== WB_SYNC_NONE
&&
2371 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
2372 f2fs_available_free_memory(sbi
, DIRTY_DENTS
))
2375 /* skip writing during file defragment */
2376 if (is_inode_flag_set(inode
, FI_DO_DEFRAG
))
2379 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
2381 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
2382 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2383 atomic_inc(&sbi
->wb_sync_req
[DATA
]);
2384 else if (atomic_read(&sbi
->wb_sync_req
[DATA
]))
2387 if (__should_serialize_io(inode
, wbc
)) {
2388 mutex_lock(&sbi
->writepages
);
2392 blk_start_plug(&plug
);
2393 ret
= f2fs_write_cache_pages(mapping
, wbc
, io_type
);
2394 blk_finish_plug(&plug
);
2397 mutex_unlock(&sbi
->writepages
);
2399 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2400 atomic_dec(&sbi
->wb_sync_req
[DATA
]);
2402 * if some pages were truncated, we cannot guarantee its mapping->host
2403 * to detect pending bios.
2406 f2fs_remove_dirty_inode(inode
);
2410 wbc
->pages_skipped
+= get_dirty_pages(inode
);
2411 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
2415 static int f2fs_write_data_pages(struct address_space
*mapping
,
2416 struct writeback_control
*wbc
)
2418 struct inode
*inode
= mapping
->host
;
2420 return __f2fs_write_data_pages(mapping
, wbc
,
2421 F2FS_I(inode
)->cp_task
== current
?
2422 FS_CP_DATA_IO
: FS_DATA_IO
);
2425 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
2427 struct inode
*inode
= mapping
->host
;
2428 loff_t i_size
= i_size_read(inode
);
2431 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2432 down_write(&F2FS_I(inode
)->i_mmap_sem
);
2434 truncate_pagecache(inode
, i_size
);
2435 if (!IS_NOQUOTA(inode
))
2436 f2fs_truncate_blocks(inode
, i_size
, true);
2438 up_write(&F2FS_I(inode
)->i_mmap_sem
);
2439 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2443 static int prepare_write_begin(struct f2fs_sb_info
*sbi
,
2444 struct page
*page
, loff_t pos
, unsigned len
,
2445 block_t
*blk_addr
, bool *node_changed
)
2447 struct inode
*inode
= page
->mapping
->host
;
2448 pgoff_t index
= page
->index
;
2449 struct dnode_of_data dn
;
2451 bool locked
= false;
2452 struct extent_info ei
= {0,0,0};
2457 * we already allocated all the blocks, so we don't need to get
2458 * the block addresses when there is no need to fill the page.
2460 if (!f2fs_has_inline_data(inode
) && len
== PAGE_SIZE
&&
2461 !is_inode_flag_set(inode
, FI_NO_PREALLOC
))
2464 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
2465 if (f2fs_has_inline_data(inode
) && pos
+ len
> MAX_INLINE_DATA(inode
))
2466 flag
= F2FS_GET_BLOCK_DEFAULT
;
2468 flag
= F2FS_GET_BLOCK_PRE_AIO
;
2470 if (f2fs_has_inline_data(inode
) ||
2471 (pos
& PAGE_MASK
) >= i_size_read(inode
)) {
2472 __do_map_lock(sbi
, flag
, true);
2476 /* check inline_data */
2477 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
2478 if (IS_ERR(ipage
)) {
2479 err
= PTR_ERR(ipage
);
2483 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
2485 if (f2fs_has_inline_data(inode
)) {
2486 if (pos
+ len
<= MAX_INLINE_DATA(inode
)) {
2487 f2fs_do_read_inline_data(page
, ipage
);
2488 set_inode_flag(inode
, FI_DATA_EXIST
);
2490 set_inline_node(ipage
);
2492 err
= f2fs_convert_inline_page(&dn
, page
);
2495 if (dn
.data_blkaddr
== NULL_ADDR
)
2496 err
= f2fs_get_block(&dn
, index
);
2498 } else if (locked
) {
2499 err
= f2fs_get_block(&dn
, index
);
2501 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
2502 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
2505 err
= f2fs_get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
2506 if (err
|| dn
.data_blkaddr
== NULL_ADDR
) {
2507 f2fs_put_dnode(&dn
);
2508 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
,
2510 WARN_ON(flag
!= F2FS_GET_BLOCK_PRE_AIO
);
2517 /* convert_inline_page can make node_changed */
2518 *blk_addr
= dn
.data_blkaddr
;
2519 *node_changed
= dn
.node_changed
;
2521 f2fs_put_dnode(&dn
);
2524 __do_map_lock(sbi
, flag
, false);
2528 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
2529 loff_t pos
, unsigned len
, unsigned flags
,
2530 struct page
**pagep
, void **fsdata
)
2532 struct inode
*inode
= mapping
->host
;
2533 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2534 struct page
*page
= NULL
;
2535 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_SHIFT
;
2536 bool need_balance
= false, drop_atomic
= false;
2537 block_t blkaddr
= NULL_ADDR
;
2540 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
2542 err
= f2fs_is_checkpoint_ready(sbi
);
2546 if ((f2fs_is_atomic_file(inode
) &&
2547 !f2fs_available_free_memory(sbi
, INMEM_PAGES
)) ||
2548 is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
2555 * We should check this at this moment to avoid deadlock on inode page
2556 * and #0 page. The locking rule for inline_data conversion should be:
2557 * lock_page(page #0) -> lock_page(inode_page)
2560 err
= f2fs_convert_inline_inode(inode
);
2566 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
2567 * wait_for_stable_page. Will wait that below with our IO control.
2569 page
= f2fs_pagecache_get_page(mapping
, index
,
2570 FGP_LOCK
| FGP_WRITE
| FGP_CREAT
, GFP_NOFS
);
2578 err
= prepare_write_begin(sbi
, page
, pos
, len
,
2579 &blkaddr
, &need_balance
);
2583 if (need_balance
&& !IS_NOQUOTA(inode
) &&
2584 has_not_enough_free_secs(sbi
, 0, 0)) {
2586 f2fs_balance_fs(sbi
, true);
2588 if (page
->mapping
!= mapping
) {
2589 /* The page got truncated from under us */
2590 f2fs_put_page(page
, 1);
2595 f2fs_wait_on_page_writeback(page
, DATA
, false, true);
2597 if (len
== PAGE_SIZE
|| PageUptodate(page
))
2600 if (!(pos
& (PAGE_SIZE
- 1)) && (pos
+ len
) >= i_size_read(inode
)) {
2601 zero_user_segment(page
, len
, PAGE_SIZE
);
2605 if (blkaddr
== NEW_ADDR
) {
2606 zero_user_segment(page
, 0, PAGE_SIZE
);
2607 SetPageUptodate(page
);
2609 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
2610 DATA_GENERIC_ENHANCE_READ
)) {
2611 err
= -EFSCORRUPTED
;
2614 err
= f2fs_submit_page_read(inode
, page
, blkaddr
);
2619 if (unlikely(page
->mapping
!= mapping
)) {
2620 f2fs_put_page(page
, 1);
2623 if (unlikely(!PageUptodate(page
))) {
2631 f2fs_put_page(page
, 1);
2632 f2fs_write_failed(mapping
, pos
+ len
);
2634 f2fs_drop_inmem_pages_all(sbi
, false);
2638 static int f2fs_write_end(struct file
*file
,
2639 struct address_space
*mapping
,
2640 loff_t pos
, unsigned len
, unsigned copied
,
2641 struct page
*page
, void *fsdata
)
2643 struct inode
*inode
= page
->mapping
->host
;
2645 trace_f2fs_write_end(inode
, pos
, len
, copied
);
2648 * This should be come from len == PAGE_SIZE, and we expect copied
2649 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2650 * let generic_perform_write() try to copy data again through copied=0.
2652 if (!PageUptodate(page
)) {
2653 if (unlikely(copied
!= len
))
2656 SetPageUptodate(page
);
2661 set_page_dirty(page
);
2663 if (pos
+ copied
> i_size_read(inode
))
2664 f2fs_i_size_write(inode
, pos
+ copied
);
2666 f2fs_put_page(page
, 1);
2667 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2671 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
2674 unsigned i_blkbits
= READ_ONCE(inode
->i_blkbits
);
2675 unsigned blkbits
= i_blkbits
;
2676 unsigned blocksize_mask
= (1 << blkbits
) - 1;
2677 unsigned long align
= offset
| iov_iter_alignment(iter
);
2678 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
2680 if (align
& blocksize_mask
) {
2682 blkbits
= blksize_bits(bdev_logical_block_size(bdev
));
2683 blocksize_mask
= (1 << blkbits
) - 1;
2684 if (align
& blocksize_mask
)
2691 static void f2fs_dio_end_io(struct bio
*bio
)
2693 struct f2fs_private_dio
*dio
= bio
->bi_private
;
2695 dec_page_count(F2FS_I_SB(dio
->inode
),
2696 dio
->write
? F2FS_DIO_WRITE
: F2FS_DIO_READ
);
2698 bio
->bi_private
= dio
->orig_private
;
2699 bio
->bi_end_io
= dio
->orig_end_io
;
2706 static void f2fs_dio_submit_bio(struct bio
*bio
, struct inode
*inode
,
2709 struct f2fs_private_dio
*dio
;
2710 bool write
= (bio_op(bio
) == REQ_OP_WRITE
);
2712 dio
= f2fs_kzalloc(F2FS_I_SB(inode
),
2713 sizeof(struct f2fs_private_dio
), GFP_NOFS
);
2718 dio
->orig_end_io
= bio
->bi_end_io
;
2719 dio
->orig_private
= bio
->bi_private
;
2722 bio
->bi_end_io
= f2fs_dio_end_io
;
2723 bio
->bi_private
= dio
;
2725 inc_page_count(F2FS_I_SB(inode
),
2726 write
? F2FS_DIO_WRITE
: F2FS_DIO_READ
);
2731 bio
->bi_status
= BLK_STS_IOERR
;
2735 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
2737 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
2738 struct inode
*inode
= mapping
->host
;
2739 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2740 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2741 size_t count
= iov_iter_count(iter
);
2742 loff_t offset
= iocb
->ki_pos
;
2743 int rw
= iov_iter_rw(iter
);
2745 enum rw_hint hint
= iocb
->ki_hint
;
2746 int whint_mode
= F2FS_OPTION(sbi
).whint_mode
;
2749 err
= check_direct_IO(inode
, iter
, offset
);
2751 return err
< 0 ? err
: 0;
2753 if (f2fs_force_buffered_io(inode
, iocb
, iter
))
2756 do_opu
= allow_outplace_dio(inode
, iocb
, iter
);
2758 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
2760 if (rw
== WRITE
&& whint_mode
== WHINT_MODE_OFF
)
2761 iocb
->ki_hint
= WRITE_LIFE_NOT_SET
;
2763 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
2764 if (!down_read_trylock(&fi
->i_gc_rwsem
[rw
])) {
2765 iocb
->ki_hint
= hint
;
2769 if (do_opu
&& !down_read_trylock(&fi
->i_gc_rwsem
[READ
])) {
2770 up_read(&fi
->i_gc_rwsem
[rw
]);
2771 iocb
->ki_hint
= hint
;
2776 down_read(&fi
->i_gc_rwsem
[rw
]);
2778 down_read(&fi
->i_gc_rwsem
[READ
]);
2781 err
= __blockdev_direct_IO(iocb
, inode
, inode
->i_sb
->s_bdev
,
2782 iter
, rw
== WRITE
? get_data_block_dio_write
:
2783 get_data_block_dio
, NULL
, f2fs_dio_submit_bio
,
2784 DIO_LOCKING
| DIO_SKIP_HOLES
);
2787 up_read(&fi
->i_gc_rwsem
[READ
]);
2789 up_read(&fi
->i_gc_rwsem
[rw
]);
2792 if (whint_mode
== WHINT_MODE_OFF
)
2793 iocb
->ki_hint
= hint
;
2795 f2fs_update_iostat(F2FS_I_SB(inode
), APP_DIRECT_IO
,
2798 set_inode_flag(inode
, FI_UPDATE_WRITE
);
2799 } else if (err
< 0) {
2800 f2fs_write_failed(mapping
, offset
+ count
);
2805 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
2810 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
2811 unsigned int length
)
2813 struct inode
*inode
= page
->mapping
->host
;
2814 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2816 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
2817 (offset
% PAGE_SIZE
|| length
!= PAGE_SIZE
))
2820 if (PageDirty(page
)) {
2821 if (inode
->i_ino
== F2FS_META_INO(sbi
)) {
2822 dec_page_count(sbi
, F2FS_DIRTY_META
);
2823 } else if (inode
->i_ino
== F2FS_NODE_INO(sbi
)) {
2824 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
2826 inode_dec_dirty_pages(inode
);
2827 f2fs_remove_dirty_inode(inode
);
2831 clear_cold_data(page
);
2833 if (IS_ATOMIC_WRITTEN_PAGE(page
))
2834 return f2fs_drop_inmem_page(inode
, page
);
2836 f2fs_clear_page_private(page
);
2839 int f2fs_release_page(struct page
*page
, gfp_t wait
)
2841 /* If this is dirty page, keep PagePrivate */
2842 if (PageDirty(page
))
2845 /* This is atomic written page, keep Private */
2846 if (IS_ATOMIC_WRITTEN_PAGE(page
))
2849 clear_cold_data(page
);
2850 f2fs_clear_page_private(page
);
2854 static int f2fs_set_data_page_dirty(struct page
*page
)
2856 struct inode
*inode
= page_file_mapping(page
)->host
;
2858 trace_f2fs_set_page_dirty(page
, DATA
);
2860 if (!PageUptodate(page
))
2861 SetPageUptodate(page
);
2862 if (PageSwapCache(page
))
2863 return __set_page_dirty_nobuffers(page
);
2865 if (f2fs_is_atomic_file(inode
) && !f2fs_is_commit_atomic_write(inode
)) {
2866 if (!IS_ATOMIC_WRITTEN_PAGE(page
)) {
2867 f2fs_register_inmem_page(inode
, page
);
2871 * Previously, this page has been registered, we just
2877 if (!PageDirty(page
)) {
2878 __set_page_dirty_nobuffers(page
);
2879 f2fs_update_dirty_page(inode
, page
);
2885 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
2887 struct inode
*inode
= mapping
->host
;
2889 if (f2fs_has_inline_data(inode
))
2892 /* make sure allocating whole blocks */
2893 if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
2894 filemap_write_and_wait(mapping
);
2896 return generic_block_bmap(mapping
, block
, get_data_block_bmap
);
2899 #ifdef CONFIG_MIGRATION
2900 #include <linux/migrate.h>
2902 int f2fs_migrate_page(struct address_space
*mapping
,
2903 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
2905 int rc
, extra_count
;
2906 struct f2fs_inode_info
*fi
= F2FS_I(mapping
->host
);
2907 bool atomic_written
= IS_ATOMIC_WRITTEN_PAGE(page
);
2909 BUG_ON(PageWriteback(page
));
2911 /* migrating an atomic written page is safe with the inmem_lock hold */
2912 if (atomic_written
) {
2913 if (mode
!= MIGRATE_SYNC
)
2915 if (!mutex_trylock(&fi
->inmem_lock
))
2919 /* one extra reference was held for atomic_write page */
2920 extra_count
= atomic_written
? 1 : 0;
2921 rc
= migrate_page_move_mapping(mapping
, newpage
,
2923 if (rc
!= MIGRATEPAGE_SUCCESS
) {
2925 mutex_unlock(&fi
->inmem_lock
);
2929 if (atomic_written
) {
2930 struct inmem_pages
*cur
;
2931 list_for_each_entry(cur
, &fi
->inmem_pages
, list
)
2932 if (cur
->page
== page
) {
2933 cur
->page
= newpage
;
2936 mutex_unlock(&fi
->inmem_lock
);
2941 if (PagePrivate(page
)) {
2942 f2fs_set_page_private(newpage
, page_private(page
));
2943 f2fs_clear_page_private(page
);
2946 if (mode
!= MIGRATE_SYNC_NO_COPY
)
2947 migrate_page_copy(newpage
, page
);
2949 migrate_page_states(newpage
, page
);
2951 return MIGRATEPAGE_SUCCESS
;
2956 /* Copied from generic_swapfile_activate() to check any holes */
2957 static int check_swap_activate(struct file
*swap_file
, unsigned int max
)
2959 struct address_space
*mapping
= swap_file
->f_mapping
;
2960 struct inode
*inode
= mapping
->host
;
2961 unsigned blocks_per_page
;
2962 unsigned long page_no
;
2964 sector_t probe_block
;
2965 sector_t last_block
;
2966 sector_t lowest_block
= -1;
2967 sector_t highest_block
= 0;
2969 blkbits
= inode
->i_blkbits
;
2970 blocks_per_page
= PAGE_SIZE
>> blkbits
;
2973 * Map all the blocks into the extent list. This code doesn't try
2978 last_block
= i_size_read(inode
) >> blkbits
;
2979 while ((probe_block
+ blocks_per_page
) <= last_block
&& page_no
< max
) {
2980 unsigned block_in_page
;
2981 sector_t first_block
;
2985 first_block
= bmap(inode
, probe_block
);
2986 if (first_block
== 0)
2990 * It must be PAGE_SIZE aligned on-disk
2992 if (first_block
& (blocks_per_page
- 1)) {
2997 for (block_in_page
= 1; block_in_page
< blocks_per_page
;
3001 block
= bmap(inode
, probe_block
+ block_in_page
);
3004 if (block
!= first_block
+ block_in_page
) {
3011 first_block
>>= (PAGE_SHIFT
- blkbits
);
3012 if (page_no
) { /* exclude the header page */
3013 if (first_block
< lowest_block
)
3014 lowest_block
= first_block
;
3015 if (first_block
> highest_block
)
3016 highest_block
= first_block
;
3020 probe_block
+= blocks_per_page
;
3027 pr_err("swapon: swapfile has holes\n");
3031 static int f2fs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
3034 struct inode
*inode
= file_inode(file
);
3037 if (!S_ISREG(inode
->i_mode
))
3040 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
3043 ret
= f2fs_convert_inline_inode(inode
);
3047 ret
= check_swap_activate(file
, sis
->max
);
3051 set_inode_flag(inode
, FI_PIN_FILE
);
3052 f2fs_precache_extents(inode
);
3053 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3057 static void f2fs_swap_deactivate(struct file
*file
)
3059 struct inode
*inode
= file_inode(file
);
3061 clear_inode_flag(inode
, FI_PIN_FILE
);
3064 static int f2fs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
3070 static void f2fs_swap_deactivate(struct file
*file
)
3075 const struct address_space_operations f2fs_dblock_aops
= {
3076 .readpage
= f2fs_read_data_page
,
3077 .readpages
= f2fs_read_data_pages
,
3078 .writepage
= f2fs_write_data_page
,
3079 .writepages
= f2fs_write_data_pages
,
3080 .write_begin
= f2fs_write_begin
,
3081 .write_end
= f2fs_write_end
,
3082 .set_page_dirty
= f2fs_set_data_page_dirty
,
3083 .invalidatepage
= f2fs_invalidate_page
,
3084 .releasepage
= f2fs_release_page
,
3085 .direct_IO
= f2fs_direct_IO
,
3087 .swap_activate
= f2fs_swap_activate
,
3088 .swap_deactivate
= f2fs_swap_deactivate
,
3089 #ifdef CONFIG_MIGRATION
3090 .migratepage
= f2fs_migrate_page
,
3094 void f2fs_clear_page_cache_dirty_tag(struct page
*page
)
3096 struct address_space
*mapping
= page_mapping(page
);
3097 unsigned long flags
;
3099 xa_lock_irqsave(&mapping
->i_pages
, flags
);
3100 __xa_clear_mark(&mapping
->i_pages
, page_index(page
),
3101 PAGECACHE_TAG_DIRTY
);
3102 xa_unlock_irqrestore(&mapping
->i_pages
, flags
);
3105 int __init
f2fs_init_post_read_processing(void)
3107 bio_post_read_ctx_cache
= KMEM_CACHE(bio_post_read_ctx
, 0);
3108 if (!bio_post_read_ctx_cache
)
3110 bio_post_read_ctx_pool
=
3111 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS
,
3112 bio_post_read_ctx_cache
);
3113 if (!bio_post_read_ctx_pool
)
3114 goto fail_free_cache
;
3118 kmem_cache_destroy(bio_post_read_ctx_cache
);
3123 void __exit
f2fs_destroy_post_read_processing(void)
3125 mempool_destroy(bio_post_read_ctx_pool
);
3126 kmem_cache_destroy(bio_post_read_ctx_cache
);