1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/blk-crypto.h>
18 #include <linux/swap.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
21 #include <linux/cleancache.h>
22 #include <linux/sched/signal.h>
23 #include <linux/fiemap.h>
29 #include <trace/events/f2fs.h>
31 #define NUM_PREALLOC_POST_READ_CTXS 128
33 static struct kmem_cache
*bio_post_read_ctx_cache
;
34 static struct kmem_cache
*bio_entry_slab
;
35 static mempool_t
*bio_post_read_ctx_pool
;
36 static struct bio_set f2fs_bioset
;
38 #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
40 int __init
f2fs_init_bioset(void)
42 if (bioset_init(&f2fs_bioset
, F2FS_BIO_POOL_SIZE
,
43 0, BIOSET_NEED_BVECS
))
48 void f2fs_destroy_bioset(void)
50 bioset_exit(&f2fs_bioset
);
53 static bool __is_cp_guaranteed(struct page
*page
)
55 struct address_space
*mapping
= page
->mapping
;
57 struct f2fs_sb_info
*sbi
;
62 inode
= mapping
->host
;
63 sbi
= F2FS_I_SB(inode
);
65 if (inode
->i_ino
== F2FS_META_INO(sbi
) ||
66 inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
67 S_ISDIR(inode
->i_mode
))
70 if (f2fs_is_compressed_page(page
))
72 if ((S_ISREG(inode
->i_mode
) &&
73 (f2fs_is_atomic_file(inode
) || IS_NOQUOTA(inode
))) ||
74 page_private_gcing(page
))
79 static enum count_type
__read_io_type(struct page
*page
)
81 struct address_space
*mapping
= page_file_mapping(page
);
84 struct inode
*inode
= mapping
->host
;
85 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
87 if (inode
->i_ino
== F2FS_META_INO(sbi
))
90 if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
96 /* postprocessing steps for read bios */
97 enum bio_post_read_step
{
98 #ifdef CONFIG_FS_ENCRYPTION
99 STEP_DECRYPT
= 1 << 0,
101 STEP_DECRYPT
= 0, /* compile out the decryption-related code */
103 #ifdef CONFIG_F2FS_FS_COMPRESSION
104 STEP_DECOMPRESS
= 1 << 1,
106 STEP_DECOMPRESS
= 0, /* compile out the decompression-related code */
108 #ifdef CONFIG_FS_VERITY
109 STEP_VERITY
= 1 << 2,
111 STEP_VERITY
= 0, /* compile out the verity-related code */
115 struct bio_post_read_ctx
{
117 struct f2fs_sb_info
*sbi
;
118 struct work_struct work
;
119 unsigned int enabled_steps
;
123 static void f2fs_finish_read_bio(struct bio
*bio
)
126 struct bvec_iter_all iter_all
;
129 * Update and unlock the bio's pagecache pages, and put the
130 * decompression context for any compressed pages.
132 bio_for_each_segment_all(bv
, bio
, iter_all
) {
133 struct page
*page
= bv
->bv_page
;
135 if (f2fs_is_compressed_page(page
)) {
137 f2fs_end_read_compressed_page(page
, true, 0);
138 f2fs_put_page_dic(page
);
142 /* PG_error was set if decryption or verity failed. */
143 if (bio
->bi_status
|| PageError(page
)) {
144 ClearPageUptodate(page
);
145 /* will re-read again later */
146 ClearPageError(page
);
148 SetPageUptodate(page
);
150 dec_page_count(F2FS_P_SB(page
), __read_io_type(page
));
155 mempool_free(bio
->bi_private
, bio_post_read_ctx_pool
);
159 static void f2fs_verify_bio(struct work_struct
*work
)
161 struct bio_post_read_ctx
*ctx
=
162 container_of(work
, struct bio_post_read_ctx
, work
);
163 struct bio
*bio
= ctx
->bio
;
164 bool may_have_compressed_pages
= (ctx
->enabled_steps
& STEP_DECOMPRESS
);
167 * fsverity_verify_bio() may call readpages() again, and while verity
168 * will be disabled for this, decryption and/or decompression may still
169 * be needed, resulting in another bio_post_read_ctx being allocated.
170 * So to prevent deadlocks we need to release the current ctx to the
171 * mempool first. This assumes that verity is the last post-read step.
173 mempool_free(ctx
, bio_post_read_ctx_pool
);
174 bio
->bi_private
= NULL
;
177 * Verify the bio's pages with fs-verity. Exclude compressed pages,
178 * as those were handled separately by f2fs_end_read_compressed_page().
180 if (may_have_compressed_pages
) {
182 struct bvec_iter_all iter_all
;
184 bio_for_each_segment_all(bv
, bio
, iter_all
) {
185 struct page
*page
= bv
->bv_page
;
187 if (!f2fs_is_compressed_page(page
) &&
188 !PageError(page
) && !fsverity_verify_page(page
))
192 fsverity_verify_bio(bio
);
195 f2fs_finish_read_bio(bio
);
199 * If the bio's data needs to be verified with fs-verity, then enqueue the
200 * verity work for the bio. Otherwise finish the bio now.
202 * Note that to avoid deadlocks, the verity work can't be done on the
203 * decryption/decompression workqueue. This is because verifying the data pages
204 * can involve reading verity metadata pages from the file, and these verity
205 * metadata pages may be encrypted and/or compressed.
207 static void f2fs_verify_and_finish_bio(struct bio
*bio
)
209 struct bio_post_read_ctx
*ctx
= bio
->bi_private
;
211 if (ctx
&& (ctx
->enabled_steps
& STEP_VERITY
)) {
212 INIT_WORK(&ctx
->work
, f2fs_verify_bio
);
213 fsverity_enqueue_verify_work(&ctx
->work
);
215 f2fs_finish_read_bio(bio
);
220 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
221 * remaining page was read by @ctx->bio.
223 * Note that a bio may span clusters (even a mix of compressed and uncompressed
224 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
225 * that the bio includes at least one compressed page. The actual decompression
226 * is done on a per-cluster basis, not a per-bio basis.
228 static void f2fs_handle_step_decompress(struct bio_post_read_ctx
*ctx
)
231 struct bvec_iter_all iter_all
;
232 bool all_compressed
= true;
233 block_t blkaddr
= ctx
->fs_blkaddr
;
235 bio_for_each_segment_all(bv
, ctx
->bio
, iter_all
) {
236 struct page
*page
= bv
->bv_page
;
238 /* PG_error was set if decryption failed. */
239 if (f2fs_is_compressed_page(page
))
240 f2fs_end_read_compressed_page(page
, PageError(page
),
243 all_compressed
= false;
249 * Optimization: if all the bio's pages are compressed, then scheduling
250 * the per-bio verity work is unnecessary, as verity will be fully
251 * handled at the compression cluster level.
254 ctx
->enabled_steps
&= ~STEP_VERITY
;
257 static void f2fs_post_read_work(struct work_struct
*work
)
259 struct bio_post_read_ctx
*ctx
=
260 container_of(work
, struct bio_post_read_ctx
, work
);
262 if (ctx
->enabled_steps
& STEP_DECRYPT
)
263 fscrypt_decrypt_bio(ctx
->bio
);
265 if (ctx
->enabled_steps
& STEP_DECOMPRESS
)
266 f2fs_handle_step_decompress(ctx
);
268 f2fs_verify_and_finish_bio(ctx
->bio
);
271 static void f2fs_read_end_io(struct bio
*bio
)
273 struct f2fs_sb_info
*sbi
= F2FS_P_SB(bio_first_page_all(bio
));
274 struct bio_post_read_ctx
*ctx
;
276 iostat_update_and_unbind_ctx(bio
, 0);
277 ctx
= bio
->bi_private
;
279 if (time_to_inject(sbi
, FAULT_READ_IO
)) {
280 f2fs_show_injection_info(sbi
, FAULT_READ_IO
);
281 bio
->bi_status
= BLK_STS_IOERR
;
284 if (bio
->bi_status
) {
285 f2fs_finish_read_bio(bio
);
289 if (ctx
&& (ctx
->enabled_steps
& (STEP_DECRYPT
| STEP_DECOMPRESS
))) {
290 INIT_WORK(&ctx
->work
, f2fs_post_read_work
);
291 queue_work(ctx
->sbi
->post_read_wq
, &ctx
->work
);
293 f2fs_verify_and_finish_bio(bio
);
297 static void f2fs_write_end_io(struct bio
*bio
)
299 struct f2fs_sb_info
*sbi
;
300 struct bio_vec
*bvec
;
301 struct bvec_iter_all iter_all
;
303 iostat_update_and_unbind_ctx(bio
, 1);
304 sbi
= bio
->bi_private
;
306 if (time_to_inject(sbi
, FAULT_WRITE_IO
)) {
307 f2fs_show_injection_info(sbi
, FAULT_WRITE_IO
);
308 bio
->bi_status
= BLK_STS_IOERR
;
311 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
312 struct page
*page
= bvec
->bv_page
;
313 enum count_type type
= WB_DATA_TYPE(page
);
315 if (page_private_dummy(page
)) {
316 clear_page_private_dummy(page
);
318 mempool_free(page
, sbi
->write_io_dummy
);
320 if (unlikely(bio
->bi_status
))
321 f2fs_stop_checkpoint(sbi
, true);
325 fscrypt_finalize_bounce_page(&page
);
327 #ifdef CONFIG_F2FS_FS_COMPRESSION
328 if (f2fs_is_compressed_page(page
)) {
329 f2fs_compress_write_end_io(bio
, page
);
334 if (unlikely(bio
->bi_status
)) {
335 mapping_set_error(page
->mapping
, -EIO
);
336 if (type
== F2FS_WB_CP_DATA
)
337 f2fs_stop_checkpoint(sbi
, true);
340 f2fs_bug_on(sbi
, page
->mapping
== NODE_MAPPING(sbi
) &&
341 page
->index
!= nid_of_node(page
));
343 dec_page_count(sbi
, type
);
344 if (f2fs_in_warm_node_list(sbi
, page
))
345 f2fs_del_fsync_node_entry(sbi
, page
);
346 clear_page_private_gcing(page
);
347 end_page_writeback(page
);
349 if (!get_pages(sbi
, F2FS_WB_CP_DATA
) &&
350 wq_has_sleeper(&sbi
->cp_wait
))
351 wake_up(&sbi
->cp_wait
);
356 struct block_device
*f2fs_target_device(struct f2fs_sb_info
*sbi
,
357 block_t blk_addr
, struct bio
*bio
)
359 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
362 if (f2fs_is_multi_device(sbi
)) {
363 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
364 if (FDEV(i
).start_blk
<= blk_addr
&&
365 FDEV(i
).end_blk
>= blk_addr
) {
366 blk_addr
-= FDEV(i
).start_blk
;
373 bio_set_dev(bio
, bdev
);
374 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
379 int f2fs_target_device_index(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
383 if (!f2fs_is_multi_device(sbi
))
386 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
387 if (FDEV(i
).start_blk
<= blkaddr
&& FDEV(i
).end_blk
>= blkaddr
)
392 static struct bio
*__bio_alloc(struct f2fs_io_info
*fio
, int npages
)
394 struct f2fs_sb_info
*sbi
= fio
->sbi
;
397 bio
= bio_alloc_bioset(GFP_NOIO
, npages
, &f2fs_bioset
);
399 f2fs_target_device(sbi
, fio
->new_blkaddr
, bio
);
400 if (is_read_io(fio
->op
)) {
401 bio
->bi_end_io
= f2fs_read_end_io
;
402 bio
->bi_private
= NULL
;
404 bio
->bi_end_io
= f2fs_write_end_io
;
405 bio
->bi_private
= sbi
;
406 bio
->bi_write_hint
= f2fs_io_type_to_rw_hint(sbi
,
407 fio
->type
, fio
->temp
);
409 iostat_alloc_and_bind_ctx(sbi
, bio
, NULL
);
412 wbc_init_bio(fio
->io_wbc
, bio
);
417 static void f2fs_set_bio_crypt_ctx(struct bio
*bio
, const struct inode
*inode
,
419 const struct f2fs_io_info
*fio
,
423 * The f2fs garbage collector sets ->encrypted_page when it wants to
424 * read/write raw data without encryption.
426 if (!fio
|| !fio
->encrypted_page
)
427 fscrypt_set_bio_crypt_ctx(bio
, inode
, first_idx
, gfp_mask
);
430 static bool f2fs_crypt_mergeable_bio(struct bio
*bio
, const struct inode
*inode
,
432 const struct f2fs_io_info
*fio
)
435 * The f2fs garbage collector sets ->encrypted_page when it wants to
436 * read/write raw data without encryption.
438 if (fio
&& fio
->encrypted_page
)
439 return !bio_has_crypt_ctx(bio
);
441 return fscrypt_mergeable_bio(bio
, inode
, next_idx
);
444 static inline void __submit_bio(struct f2fs_sb_info
*sbi
,
445 struct bio
*bio
, enum page_type type
)
447 if (!is_read_io(bio_op(bio
))) {
450 if (type
!= DATA
&& type
!= NODE
)
453 if (f2fs_lfs_mode(sbi
) && current
->plug
)
454 blk_finish_plug(current
->plug
);
456 if (!F2FS_IO_ALIGNED(sbi
))
459 start
= bio
->bi_iter
.bi_size
>> F2FS_BLKSIZE_BITS
;
460 start
%= F2FS_IO_SIZE(sbi
);
465 /* fill dummy pages */
466 for (; start
< F2FS_IO_SIZE(sbi
); start
++) {
468 mempool_alloc(sbi
->write_io_dummy
,
469 GFP_NOIO
| __GFP_NOFAIL
);
470 f2fs_bug_on(sbi
, !page
);
474 zero_user_segment(page
, 0, PAGE_SIZE
);
475 set_page_private_dummy(page
);
477 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
)
481 * In the NODE case, we lose next block address chain. So, we
482 * need to do checkpoint in f2fs_sync_file.
485 set_sbi_flag(sbi
, SBI_NEED_CP
);
488 if (is_read_io(bio_op(bio
)))
489 trace_f2fs_submit_read_bio(sbi
->sb
, type
, bio
);
491 trace_f2fs_submit_write_bio(sbi
->sb
, type
, bio
);
493 iostat_update_submit_ctx(bio
, type
);
497 void f2fs_submit_bio(struct f2fs_sb_info
*sbi
,
498 struct bio
*bio
, enum page_type type
)
500 __submit_bio(sbi
, bio
, type
);
503 static void __attach_io_flag(struct f2fs_io_info
*fio
)
505 struct f2fs_sb_info
*sbi
= fio
->sbi
;
506 unsigned int temp_mask
= (1 << NR_TEMP_TYPE
) - 1;
507 unsigned int io_flag
, fua_flag
, meta_flag
;
509 if (fio
->type
== DATA
)
510 io_flag
= sbi
->data_io_flag
;
511 else if (fio
->type
== NODE
)
512 io_flag
= sbi
->node_io_flag
;
516 fua_flag
= io_flag
& temp_mask
;
517 meta_flag
= (io_flag
>> NR_TEMP_TYPE
) & temp_mask
;
520 * data/node io flag bits per temp:
521 * REQ_META | REQ_FUA |
522 * 5 | 4 | 3 | 2 | 1 | 0 |
523 * Cold | Warm | Hot | Cold | Warm | Hot |
525 if ((1 << fio
->temp
) & meta_flag
)
526 fio
->op_flags
|= REQ_META
;
527 if ((1 << fio
->temp
) & fua_flag
)
528 fio
->op_flags
|= REQ_FUA
;
531 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
533 struct f2fs_io_info
*fio
= &io
->fio
;
538 __attach_io_flag(fio
);
539 bio_set_op_attrs(io
->bio
, fio
->op
, fio
->op_flags
);
541 if (is_read_io(fio
->op
))
542 trace_f2fs_prepare_read_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
544 trace_f2fs_prepare_write_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
546 __submit_bio(io
->sbi
, io
->bio
, fio
->type
);
550 static bool __has_merged_page(struct bio
*bio
, struct inode
*inode
,
551 struct page
*page
, nid_t ino
)
553 struct bio_vec
*bvec
;
554 struct bvec_iter_all iter_all
;
559 if (!inode
&& !page
&& !ino
)
562 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
563 struct page
*target
= bvec
->bv_page
;
565 if (fscrypt_is_bounce_page(target
)) {
566 target
= fscrypt_pagecache_page(target
);
570 if (f2fs_is_compressed_page(target
)) {
571 target
= f2fs_compress_control_page(target
);
576 if (inode
&& inode
== target
->mapping
->host
)
578 if (page
&& page
== target
)
580 if (ino
&& ino
== ino_of_node(target
))
587 static void __f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
,
588 enum page_type type
, enum temp_type temp
)
590 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
591 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + temp
;
593 down_write(&io
->io_rwsem
);
595 /* change META to META_FLUSH in the checkpoint procedure */
596 if (type
>= META_FLUSH
) {
597 io
->fio
.type
= META_FLUSH
;
598 io
->fio
.op
= REQ_OP_WRITE
;
599 io
->fio
.op_flags
= REQ_META
| REQ_PRIO
| REQ_SYNC
;
600 if (!test_opt(sbi
, NOBARRIER
))
601 io
->fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
603 __submit_merged_bio(io
);
604 up_write(&io
->io_rwsem
);
607 static void __submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
608 struct inode
*inode
, struct page
*page
,
609 nid_t ino
, enum page_type type
, bool force
)
614 for (temp
= HOT
; temp
< NR_TEMP_TYPE
; temp
++) {
616 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
617 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + temp
;
619 down_read(&io
->io_rwsem
);
620 ret
= __has_merged_page(io
->bio
, inode
, page
, ino
);
621 up_read(&io
->io_rwsem
);
624 __f2fs_submit_merged_write(sbi
, type
, temp
);
626 /* TODO: use HOT temp only for meta pages now. */
632 void f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
, enum page_type type
)
634 __submit_merged_write_cond(sbi
, NULL
, NULL
, 0, type
, true);
637 void f2fs_submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
638 struct inode
*inode
, struct page
*page
,
639 nid_t ino
, enum page_type type
)
641 __submit_merged_write_cond(sbi
, inode
, page
, ino
, type
, false);
644 void f2fs_flush_merged_writes(struct f2fs_sb_info
*sbi
)
646 f2fs_submit_merged_write(sbi
, DATA
);
647 f2fs_submit_merged_write(sbi
, NODE
);
648 f2fs_submit_merged_write(sbi
, META
);
652 * Fill the locked page with data located in the block address.
653 * A caller needs to unlock the page on failure.
655 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
658 struct page
*page
= fio
->encrypted_page
?
659 fio
->encrypted_page
: fio
->page
;
661 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->new_blkaddr
,
662 fio
->is_por
? META_POR
: (__is_meta_io(fio
) ?
663 META_GENERIC
: DATA_GENERIC_ENHANCE
)))
664 return -EFSCORRUPTED
;
666 trace_f2fs_submit_page_bio(page
, fio
);
668 /* Allocate a new bio */
669 bio
= __bio_alloc(fio
, 1);
671 f2fs_set_bio_crypt_ctx(bio
, fio
->page
->mapping
->host
,
672 fio
->page
->index
, fio
, GFP_NOIO
);
674 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
679 if (fio
->io_wbc
&& !is_read_io(fio
->op
))
680 wbc_account_cgroup_owner(fio
->io_wbc
, page
, PAGE_SIZE
);
682 __attach_io_flag(fio
);
683 bio_set_op_attrs(bio
, fio
->op
, fio
->op_flags
);
685 inc_page_count(fio
->sbi
, is_read_io(fio
->op
) ?
686 __read_io_type(page
): WB_DATA_TYPE(fio
->page
));
688 __submit_bio(fio
->sbi
, bio
, fio
->type
);
692 static bool page_is_mergeable(struct f2fs_sb_info
*sbi
, struct bio
*bio
,
693 block_t last_blkaddr
, block_t cur_blkaddr
)
695 if (unlikely(sbi
->max_io_bytes
&&
696 bio
->bi_iter
.bi_size
>= sbi
->max_io_bytes
))
698 if (last_blkaddr
+ 1 != cur_blkaddr
)
700 return bio
->bi_bdev
== f2fs_target_device(sbi
, cur_blkaddr
, NULL
);
703 static bool io_type_is_mergeable(struct f2fs_bio_info
*io
,
704 struct f2fs_io_info
*fio
)
706 if (io
->fio
.op
!= fio
->op
)
708 return io
->fio
.op_flags
== fio
->op_flags
;
711 static bool io_is_mergeable(struct f2fs_sb_info
*sbi
, struct bio
*bio
,
712 struct f2fs_bio_info
*io
,
713 struct f2fs_io_info
*fio
,
714 block_t last_blkaddr
,
717 if (F2FS_IO_ALIGNED(sbi
) && (fio
->type
== DATA
|| fio
->type
== NODE
)) {
718 unsigned int filled_blocks
=
719 F2FS_BYTES_TO_BLK(bio
->bi_iter
.bi_size
);
720 unsigned int io_size
= F2FS_IO_SIZE(sbi
);
721 unsigned int left_vecs
= bio
->bi_max_vecs
- bio
->bi_vcnt
;
723 /* IOs in bio is aligned and left space of vectors is not enough */
724 if (!(filled_blocks
% io_size
) && left_vecs
< io_size
)
727 if (!page_is_mergeable(sbi
, bio
, last_blkaddr
, cur_blkaddr
))
729 return io_type_is_mergeable(io
, fio
);
732 static void add_bio_entry(struct f2fs_sb_info
*sbi
, struct bio
*bio
,
733 struct page
*page
, enum temp_type temp
)
735 struct f2fs_bio_info
*io
= sbi
->write_io
[DATA
] + temp
;
736 struct bio_entry
*be
;
738 be
= f2fs_kmem_cache_alloc(bio_entry_slab
, GFP_NOFS
, true, NULL
);
742 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) != PAGE_SIZE
)
745 down_write(&io
->bio_list_lock
);
746 list_add_tail(&be
->list
, &io
->bio_list
);
747 up_write(&io
->bio_list_lock
);
750 static void del_bio_entry(struct bio_entry
*be
)
753 kmem_cache_free(bio_entry_slab
, be
);
756 static int add_ipu_page(struct f2fs_io_info
*fio
, struct bio
**bio
,
759 struct f2fs_sb_info
*sbi
= fio
->sbi
;
764 for (temp
= HOT
; temp
< NR_TEMP_TYPE
&& !found
; temp
++) {
765 struct f2fs_bio_info
*io
= sbi
->write_io
[DATA
] + temp
;
766 struct list_head
*head
= &io
->bio_list
;
767 struct bio_entry
*be
;
769 down_write(&io
->bio_list_lock
);
770 list_for_each_entry(be
, head
, list
) {
776 f2fs_bug_on(sbi
, !page_is_mergeable(sbi
, *bio
,
779 if (f2fs_crypt_mergeable_bio(*bio
,
780 fio
->page
->mapping
->host
,
781 fio
->page
->index
, fio
) &&
782 bio_add_page(*bio
, page
, PAGE_SIZE
, 0) ==
788 /* page can't be merged into bio; submit the bio */
790 __submit_bio(sbi
, *bio
, DATA
);
793 up_write(&io
->bio_list_lock
);
804 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info
*sbi
,
805 struct bio
**bio
, struct page
*page
)
809 struct bio
*target
= bio
? *bio
: NULL
;
811 for (temp
= HOT
; temp
< NR_TEMP_TYPE
&& !found
; temp
++) {
812 struct f2fs_bio_info
*io
= sbi
->write_io
[DATA
] + temp
;
813 struct list_head
*head
= &io
->bio_list
;
814 struct bio_entry
*be
;
816 if (list_empty(head
))
819 down_read(&io
->bio_list_lock
);
820 list_for_each_entry(be
, head
, list
) {
822 found
= (target
== be
->bio
);
824 found
= __has_merged_page(be
->bio
, NULL
,
829 up_read(&io
->bio_list_lock
);
836 down_write(&io
->bio_list_lock
);
837 list_for_each_entry(be
, head
, list
) {
839 found
= (target
== be
->bio
);
841 found
= __has_merged_page(be
->bio
, NULL
,
849 up_write(&io
->bio_list_lock
);
853 __submit_bio(sbi
, target
, DATA
);
860 int f2fs_merge_page_bio(struct f2fs_io_info
*fio
)
862 struct bio
*bio
= *fio
->bio
;
863 struct page
*page
= fio
->encrypted_page
?
864 fio
->encrypted_page
: fio
->page
;
866 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->new_blkaddr
,
867 __is_meta_io(fio
) ? META_GENERIC
: DATA_GENERIC
))
868 return -EFSCORRUPTED
;
870 trace_f2fs_submit_page_bio(page
, fio
);
872 if (bio
&& !page_is_mergeable(fio
->sbi
, bio
, *fio
->last_block
,
874 f2fs_submit_merged_ipu_write(fio
->sbi
, &bio
, NULL
);
877 bio
= __bio_alloc(fio
, BIO_MAX_VECS
);
878 __attach_io_flag(fio
);
879 f2fs_set_bio_crypt_ctx(bio
, fio
->page
->mapping
->host
,
880 fio
->page
->index
, fio
, GFP_NOIO
);
881 bio_set_op_attrs(bio
, fio
->op
, fio
->op_flags
);
883 add_bio_entry(fio
->sbi
, bio
, page
, fio
->temp
);
885 if (add_ipu_page(fio
, &bio
, page
))
890 wbc_account_cgroup_owner(fio
->io_wbc
, page
, PAGE_SIZE
);
892 inc_page_count(fio
->sbi
, WB_DATA_TYPE(page
));
894 *fio
->last_block
= fio
->new_blkaddr
;
900 void f2fs_submit_page_write(struct f2fs_io_info
*fio
)
902 struct f2fs_sb_info
*sbi
= fio
->sbi
;
903 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
904 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + fio
->temp
;
905 struct page
*bio_page
;
907 f2fs_bug_on(sbi
, is_read_io(fio
->op
));
909 down_write(&io
->io_rwsem
);
912 spin_lock(&io
->io_lock
);
913 if (list_empty(&io
->io_list
)) {
914 spin_unlock(&io
->io_lock
);
917 fio
= list_first_entry(&io
->io_list
,
918 struct f2fs_io_info
, list
);
919 list_del(&fio
->list
);
920 spin_unlock(&io
->io_lock
);
923 verify_fio_blkaddr(fio
);
925 if (fio
->encrypted_page
)
926 bio_page
= fio
->encrypted_page
;
927 else if (fio
->compressed_page
)
928 bio_page
= fio
->compressed_page
;
930 bio_page
= fio
->page
;
932 /* set submitted = true as a return value */
933 fio
->submitted
= true;
935 inc_page_count(sbi
, WB_DATA_TYPE(bio_page
));
938 (!io_is_mergeable(sbi
, io
->bio
, io
, fio
, io
->last_block_in_bio
,
940 !f2fs_crypt_mergeable_bio(io
->bio
, fio
->page
->mapping
->host
,
941 bio_page
->index
, fio
)))
942 __submit_merged_bio(io
);
944 if (io
->bio
== NULL
) {
945 if (F2FS_IO_ALIGNED(sbi
) &&
946 (fio
->type
== DATA
|| fio
->type
== NODE
) &&
947 fio
->new_blkaddr
& F2FS_IO_SIZE_MASK(sbi
)) {
948 dec_page_count(sbi
, WB_DATA_TYPE(bio_page
));
952 io
->bio
= __bio_alloc(fio
, BIO_MAX_VECS
);
953 f2fs_set_bio_crypt_ctx(io
->bio
, fio
->page
->mapping
->host
,
954 bio_page
->index
, fio
, GFP_NOIO
);
958 if (bio_add_page(io
->bio
, bio_page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
959 __submit_merged_bio(io
);
964 wbc_account_cgroup_owner(fio
->io_wbc
, bio_page
, PAGE_SIZE
);
966 io
->last_block_in_bio
= fio
->new_blkaddr
;
968 trace_f2fs_submit_page_write(fio
->page
, fio
);
973 if (is_sbi_flag_set(sbi
, SBI_IS_SHUTDOWN
) ||
974 !f2fs_is_checkpoint_ready(sbi
))
975 __submit_merged_bio(io
);
976 up_write(&io
->io_rwsem
);
979 static struct bio
*f2fs_grab_read_bio(struct inode
*inode
, block_t blkaddr
,
980 unsigned nr_pages
, unsigned op_flag
,
981 pgoff_t first_idx
, bool for_write
)
983 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
985 struct bio_post_read_ctx
*ctx
= NULL
;
986 unsigned int post_read_steps
= 0;
988 bio
= bio_alloc_bioset(for_write
? GFP_NOIO
: GFP_KERNEL
,
989 bio_max_segs(nr_pages
), &f2fs_bioset
);
991 return ERR_PTR(-ENOMEM
);
993 f2fs_set_bio_crypt_ctx(bio
, inode
, first_idx
, NULL
, GFP_NOFS
);
995 f2fs_target_device(sbi
, blkaddr
, bio
);
996 bio
->bi_end_io
= f2fs_read_end_io
;
997 bio_set_op_attrs(bio
, REQ_OP_READ
, op_flag
);
999 if (fscrypt_inode_uses_fs_layer_crypto(inode
))
1000 post_read_steps
|= STEP_DECRYPT
;
1002 if (f2fs_need_verity(inode
, first_idx
))
1003 post_read_steps
|= STEP_VERITY
;
1006 * STEP_DECOMPRESS is handled specially, since a compressed file might
1007 * contain both compressed and uncompressed clusters. We'll allocate a
1008 * bio_post_read_ctx if the file is compressed, but the caller is
1009 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1012 if (post_read_steps
|| f2fs_compressed_file(inode
)) {
1013 /* Due to the mempool, this never fails. */
1014 ctx
= mempool_alloc(bio_post_read_ctx_pool
, GFP_NOFS
);
1017 ctx
->enabled_steps
= post_read_steps
;
1018 ctx
->fs_blkaddr
= blkaddr
;
1019 bio
->bi_private
= ctx
;
1021 iostat_alloc_and_bind_ctx(sbi
, bio
, ctx
);
1026 /* This can handle encryption stuffs */
1027 static int f2fs_submit_page_read(struct inode
*inode
, struct page
*page
,
1028 block_t blkaddr
, int op_flags
, bool for_write
)
1030 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1033 bio
= f2fs_grab_read_bio(inode
, blkaddr
, 1, op_flags
,
1034 page
->index
, for_write
);
1036 return PTR_ERR(bio
);
1038 /* wait for GCed page writeback via META_MAPPING */
1039 f2fs_wait_on_block_writeback(inode
, blkaddr
);
1041 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
1045 ClearPageError(page
);
1046 inc_page_count(sbi
, F2FS_RD_DATA
);
1047 f2fs_update_iostat(sbi
, FS_DATA_READ_IO
, F2FS_BLKSIZE
);
1048 __submit_bio(sbi
, bio
, DATA
);
1052 static void __set_data_blkaddr(struct dnode_of_data
*dn
)
1054 struct f2fs_node
*rn
= F2FS_NODE(dn
->node_page
);
1058 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
1059 base
= get_extra_isize(dn
->inode
);
1061 /* Get physical address of data block */
1062 addr_array
= blkaddr_in_node(rn
);
1063 addr_array
[base
+ dn
->ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
1067 * Lock ordering for the change of data block address:
1070 * update block addresses in the node page
1072 void f2fs_set_data_blkaddr(struct dnode_of_data
*dn
)
1074 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true, true);
1075 __set_data_blkaddr(dn
);
1076 if (set_page_dirty(dn
->node_page
))
1077 dn
->node_changed
= true;
1080 void f2fs_update_data_blkaddr(struct dnode_of_data
*dn
, block_t blkaddr
)
1082 dn
->data_blkaddr
= blkaddr
;
1083 f2fs_set_data_blkaddr(dn
);
1084 f2fs_update_extent_cache(dn
);
1087 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
1088 int f2fs_reserve_new_blocks(struct dnode_of_data
*dn
, blkcnt_t count
)
1090 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1096 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
1098 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
1101 trace_f2fs_reserve_new_blocks(dn
->inode
, dn
->nid
,
1102 dn
->ofs_in_node
, count
);
1104 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true, true);
1106 for (; count
> 0; dn
->ofs_in_node
++) {
1107 block_t blkaddr
= f2fs_data_blkaddr(dn
);
1109 if (blkaddr
== NULL_ADDR
) {
1110 dn
->data_blkaddr
= NEW_ADDR
;
1111 __set_data_blkaddr(dn
);
1116 if (set_page_dirty(dn
->node_page
))
1117 dn
->node_changed
= true;
1121 /* Should keep dn->ofs_in_node unchanged */
1122 int f2fs_reserve_new_block(struct dnode_of_data
*dn
)
1124 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1127 ret
= f2fs_reserve_new_blocks(dn
, 1);
1128 dn
->ofs_in_node
= ofs_in_node
;
1132 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
1134 bool need_put
= dn
->inode_page
? false : true;
1137 err
= f2fs_get_dnode_of_data(dn
, index
, ALLOC_NODE
);
1141 if (dn
->data_blkaddr
== NULL_ADDR
)
1142 err
= f2fs_reserve_new_block(dn
);
1143 if (err
|| need_put
)
1148 int f2fs_get_block(struct dnode_of_data
*dn
, pgoff_t index
)
1150 struct extent_info ei
= {0, };
1151 struct inode
*inode
= dn
->inode
;
1153 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
1154 dn
->data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1158 return f2fs_reserve_block(dn
, index
);
1161 struct page
*f2fs_get_read_data_page(struct inode
*inode
, pgoff_t index
,
1162 int op_flags
, bool for_write
)
1164 struct address_space
*mapping
= inode
->i_mapping
;
1165 struct dnode_of_data dn
;
1167 struct extent_info ei
= {0, };
1170 page
= f2fs_grab_cache_page(mapping
, index
, for_write
);
1172 return ERR_PTR(-ENOMEM
);
1174 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
1175 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1176 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode
), dn
.data_blkaddr
,
1177 DATA_GENERIC_ENHANCE_READ
)) {
1178 err
= -EFSCORRUPTED
;
1184 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1185 err
= f2fs_get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
1188 f2fs_put_dnode(&dn
);
1190 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
1194 if (dn
.data_blkaddr
!= NEW_ADDR
&&
1195 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
1197 DATA_GENERIC_ENHANCE
)) {
1198 err
= -EFSCORRUPTED
;
1202 if (PageUptodate(page
)) {
1208 * A new dentry page is allocated but not able to be written, since its
1209 * new inode page couldn't be allocated due to -ENOSPC.
1210 * In such the case, its blkaddr can be remained as NEW_ADDR.
1211 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1212 * f2fs_init_inode_metadata.
1214 if (dn
.data_blkaddr
== NEW_ADDR
) {
1215 zero_user_segment(page
, 0, PAGE_SIZE
);
1216 if (!PageUptodate(page
))
1217 SetPageUptodate(page
);
1222 err
= f2fs_submit_page_read(inode
, page
, dn
.data_blkaddr
,
1223 op_flags
, for_write
);
1229 f2fs_put_page(page
, 1);
1230 return ERR_PTR(err
);
1233 struct page
*f2fs_find_data_page(struct inode
*inode
, pgoff_t index
)
1235 struct address_space
*mapping
= inode
->i_mapping
;
1238 page
= find_get_page(mapping
, index
);
1239 if (page
&& PageUptodate(page
))
1241 f2fs_put_page(page
, 0);
1243 page
= f2fs_get_read_data_page(inode
, index
, 0, false);
1247 if (PageUptodate(page
))
1250 wait_on_page_locked(page
);
1251 if (unlikely(!PageUptodate(page
))) {
1252 f2fs_put_page(page
, 0);
1253 return ERR_PTR(-EIO
);
1259 * If it tries to access a hole, return an error.
1260 * Because, the callers, functions in dir.c and GC, should be able to know
1261 * whether this page exists or not.
1263 struct page
*f2fs_get_lock_data_page(struct inode
*inode
, pgoff_t index
,
1266 struct address_space
*mapping
= inode
->i_mapping
;
1269 page
= f2fs_get_read_data_page(inode
, index
, 0, for_write
);
1273 /* wait for read completion */
1275 if (unlikely(page
->mapping
!= mapping
)) {
1276 f2fs_put_page(page
, 1);
1279 if (unlikely(!PageUptodate(page
))) {
1280 f2fs_put_page(page
, 1);
1281 return ERR_PTR(-EIO
);
1287 * Caller ensures that this data page is never allocated.
1288 * A new zero-filled data page is allocated in the page cache.
1290 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1292 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1293 * ipage should be released by this function.
1295 struct page
*f2fs_get_new_data_page(struct inode
*inode
,
1296 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
1298 struct address_space
*mapping
= inode
->i_mapping
;
1300 struct dnode_of_data dn
;
1303 page
= f2fs_grab_cache_page(mapping
, index
, true);
1306 * before exiting, we should make sure ipage will be released
1307 * if any error occur.
1309 f2fs_put_page(ipage
, 1);
1310 return ERR_PTR(-ENOMEM
);
1313 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
1314 err
= f2fs_reserve_block(&dn
, index
);
1316 f2fs_put_page(page
, 1);
1317 return ERR_PTR(err
);
1320 f2fs_put_dnode(&dn
);
1322 if (PageUptodate(page
))
1325 if (dn
.data_blkaddr
== NEW_ADDR
) {
1326 zero_user_segment(page
, 0, PAGE_SIZE
);
1327 if (!PageUptodate(page
))
1328 SetPageUptodate(page
);
1330 f2fs_put_page(page
, 1);
1332 /* if ipage exists, blkaddr should be NEW_ADDR */
1333 f2fs_bug_on(F2FS_I_SB(inode
), ipage
);
1334 page
= f2fs_get_lock_data_page(inode
, index
, true);
1339 if (new_i_size
&& i_size_read(inode
) <
1340 ((loff_t
)(index
+ 1) << PAGE_SHIFT
))
1341 f2fs_i_size_write(inode
, ((loff_t
)(index
+ 1) << PAGE_SHIFT
));
1345 static int __allocate_data_block(struct dnode_of_data
*dn
, int seg_type
)
1347 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1348 struct f2fs_summary sum
;
1349 struct node_info ni
;
1350 block_t old_blkaddr
;
1354 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
1357 err
= f2fs_get_node_info(sbi
, dn
->nid
, &ni
);
1361 dn
->data_blkaddr
= f2fs_data_blkaddr(dn
);
1362 if (dn
->data_blkaddr
!= NULL_ADDR
)
1365 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
1369 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
1370 old_blkaddr
= dn
->data_blkaddr
;
1371 f2fs_allocate_data_block(sbi
, NULL
, old_blkaddr
, &dn
->data_blkaddr
,
1372 &sum
, seg_type
, NULL
);
1373 if (GET_SEGNO(sbi
, old_blkaddr
) != NULL_SEGNO
) {
1374 invalidate_mapping_pages(META_MAPPING(sbi
),
1375 old_blkaddr
, old_blkaddr
);
1376 f2fs_invalidate_compress_page(sbi
, old_blkaddr
);
1378 f2fs_update_data_blkaddr(dn
, dn
->data_blkaddr
);
1381 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1382 * data from unwritten block via dio_read.
1387 int f2fs_preallocate_blocks(struct kiocb
*iocb
, struct iov_iter
*from
)
1389 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1390 struct f2fs_map_blocks map
;
1393 bool direct_io
= iocb
->ki_flags
& IOCB_DIRECT
;
1395 map
.m_lblk
= F2FS_BLK_ALIGN(iocb
->ki_pos
);
1396 map
.m_len
= F2FS_BYTES_TO_BLK(iocb
->ki_pos
+ iov_iter_count(from
));
1397 if (map
.m_len
> map
.m_lblk
)
1398 map
.m_len
-= map
.m_lblk
;
1402 map
.m_next_pgofs
= NULL
;
1403 map
.m_next_extent
= NULL
;
1404 map
.m_seg_type
= NO_CHECK_TYPE
;
1405 map
.m_may_create
= true;
1408 map
.m_seg_type
= f2fs_rw_hint_to_seg_type(iocb
->ki_hint
);
1409 flag
= f2fs_force_buffered_io(inode
, iocb
, from
) ?
1410 F2FS_GET_BLOCK_PRE_AIO
:
1411 F2FS_GET_BLOCK_PRE_DIO
;
1414 if (iocb
->ki_pos
+ iov_iter_count(from
) > MAX_INLINE_DATA(inode
)) {
1415 err
= f2fs_convert_inline_inode(inode
);
1419 if (f2fs_has_inline_data(inode
))
1422 flag
= F2FS_GET_BLOCK_PRE_AIO
;
1425 err
= f2fs_map_blocks(inode
, &map
, 1, flag
);
1426 if (map
.m_len
> 0 && err
== -ENOSPC
) {
1428 set_inode_flag(inode
, FI_NO_PREALLOC
);
1434 void f2fs_do_map_lock(struct f2fs_sb_info
*sbi
, int flag
, bool lock
)
1436 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
1438 down_read(&sbi
->node_change
);
1440 up_read(&sbi
->node_change
);
1445 f2fs_unlock_op(sbi
);
1450 * f2fs_map_blocks() tries to find or build mapping relationship which
1451 * maps continuous logical blocks to physical blocks, and return such
1452 * info via f2fs_map_blocks structure.
1454 int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
1455 int create
, int flag
)
1457 unsigned int maxblocks
= map
->m_len
;
1458 struct dnode_of_data dn
;
1459 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1460 int mode
= map
->m_may_create
? ALLOC_NODE
: LOOKUP_NODE
;
1461 pgoff_t pgofs
, end_offset
, end
;
1462 int err
= 0, ofs
= 1;
1463 unsigned int ofs_in_node
, last_ofs_in_node
;
1465 struct extent_info ei
= {0, };
1467 unsigned int start_pgofs
;
1475 /* it only supports block size == page size */
1476 pgofs
= (pgoff_t
)map
->m_lblk
;
1477 end
= pgofs
+ maxblocks
;
1479 if (!create
&& f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
1480 if (f2fs_lfs_mode(sbi
) && flag
== F2FS_GET_BLOCK_DIO
&&
1484 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
1485 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
1486 map
->m_flags
= F2FS_MAP_MAPPED
;
1487 if (map
->m_next_extent
)
1488 *map
->m_next_extent
= pgofs
+ map
->m_len
;
1490 /* for hardware encryption, but to avoid potential issue in future */
1491 if (flag
== F2FS_GET_BLOCK_DIO
)
1492 f2fs_wait_on_block_writeback_range(inode
,
1493 map
->m_pblk
, map
->m_len
);
1498 if (map
->m_may_create
)
1499 f2fs_do_map_lock(sbi
, flag
, true);
1501 /* When reading holes, we need its node page */
1502 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1503 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, mode
);
1505 if (flag
== F2FS_GET_BLOCK_BMAP
)
1508 if (err
== -ENOENT
) {
1510 * There is one exceptional case that read_node_page()
1511 * may return -ENOENT due to filesystem has been
1512 * shutdown or cp_error, so force to convert error
1513 * number to EIO for such case.
1515 if (map
->m_may_create
&&
1516 (is_sbi_flag_set(sbi
, SBI_IS_SHUTDOWN
) ||
1517 f2fs_cp_error(sbi
))) {
1523 if (map
->m_next_pgofs
)
1524 *map
->m_next_pgofs
=
1525 f2fs_get_next_page_offset(&dn
, pgofs
);
1526 if (map
->m_next_extent
)
1527 *map
->m_next_extent
=
1528 f2fs_get_next_page_offset(&dn
, pgofs
);
1533 start_pgofs
= pgofs
;
1535 last_ofs_in_node
= ofs_in_node
= dn
.ofs_in_node
;
1536 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1539 blkaddr
= f2fs_data_blkaddr(&dn
);
1541 if (__is_valid_data_blkaddr(blkaddr
) &&
1542 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC_ENHANCE
)) {
1543 err
= -EFSCORRUPTED
;
1547 if (__is_valid_data_blkaddr(blkaddr
)) {
1548 /* use out-place-update for driect IO under LFS mode */
1549 if (f2fs_lfs_mode(sbi
) && flag
== F2FS_GET_BLOCK_DIO
&&
1550 map
->m_may_create
) {
1551 err
= __allocate_data_block(&dn
, map
->m_seg_type
);
1554 blkaddr
= dn
.data_blkaddr
;
1555 set_inode_flag(inode
, FI_APPEND_WRITE
);
1559 if (unlikely(f2fs_cp_error(sbi
))) {
1563 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
1564 if (blkaddr
== NULL_ADDR
) {
1566 last_ofs_in_node
= dn
.ofs_in_node
;
1569 WARN_ON(flag
!= F2FS_GET_BLOCK_PRE_DIO
&&
1570 flag
!= F2FS_GET_BLOCK_DIO
);
1571 err
= __allocate_data_block(&dn
,
1574 set_inode_flag(inode
, FI_APPEND_WRITE
);
1578 map
->m_flags
|= F2FS_MAP_NEW
;
1579 blkaddr
= dn
.data_blkaddr
;
1581 if (f2fs_compressed_file(inode
) &&
1582 f2fs_sanity_check_cluster(&dn
) &&
1583 (flag
!= F2FS_GET_BLOCK_FIEMAP
||
1584 IS_ENABLED(CONFIG_F2FS_CHECK_FS
))) {
1585 err
= -EFSCORRUPTED
;
1588 if (flag
== F2FS_GET_BLOCK_BMAP
) {
1592 if (flag
== F2FS_GET_BLOCK_PRECACHE
)
1594 if (flag
== F2FS_GET_BLOCK_FIEMAP
&&
1595 blkaddr
== NULL_ADDR
) {
1596 if (map
->m_next_pgofs
)
1597 *map
->m_next_pgofs
= pgofs
+ 1;
1600 if (flag
!= F2FS_GET_BLOCK_FIEMAP
) {
1601 /* for defragment case */
1602 if (map
->m_next_pgofs
)
1603 *map
->m_next_pgofs
= pgofs
+ 1;
1609 if (flag
== F2FS_GET_BLOCK_PRE_AIO
)
1612 if (map
->m_len
== 0) {
1613 /* preallocated unwritten block should be mapped for fiemap. */
1614 if (blkaddr
== NEW_ADDR
)
1615 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
1616 map
->m_flags
|= F2FS_MAP_MAPPED
;
1618 map
->m_pblk
= blkaddr
;
1620 } else if ((map
->m_pblk
!= NEW_ADDR
&&
1621 blkaddr
== (map
->m_pblk
+ ofs
)) ||
1622 (map
->m_pblk
== NEW_ADDR
&& blkaddr
== NEW_ADDR
) ||
1623 flag
== F2FS_GET_BLOCK_PRE_DIO
) {
1634 /* preallocate blocks in batch for one dnode page */
1635 if (flag
== F2FS_GET_BLOCK_PRE_AIO
&&
1636 (pgofs
== end
|| dn
.ofs_in_node
== end_offset
)) {
1638 dn
.ofs_in_node
= ofs_in_node
;
1639 err
= f2fs_reserve_new_blocks(&dn
, prealloc
);
1643 map
->m_len
+= dn
.ofs_in_node
- ofs_in_node
;
1644 if (prealloc
&& dn
.ofs_in_node
!= last_ofs_in_node
+ 1) {
1648 dn
.ofs_in_node
= end_offset
;
1653 else if (dn
.ofs_in_node
< end_offset
)
1656 if (flag
== F2FS_GET_BLOCK_PRECACHE
) {
1657 if (map
->m_flags
& F2FS_MAP_MAPPED
) {
1658 unsigned int ofs
= start_pgofs
- map
->m_lblk
;
1660 f2fs_update_extent_cache_range(&dn
,
1661 start_pgofs
, map
->m_pblk
+ ofs
,
1666 f2fs_put_dnode(&dn
);
1668 if (map
->m_may_create
) {
1669 f2fs_do_map_lock(sbi
, flag
, false);
1670 f2fs_balance_fs(sbi
, dn
.node_changed
);
1676 /* for hardware encryption, but to avoid potential issue in future */
1677 if (flag
== F2FS_GET_BLOCK_DIO
&& map
->m_flags
& F2FS_MAP_MAPPED
)
1678 f2fs_wait_on_block_writeback_range(inode
,
1679 map
->m_pblk
, map
->m_len
);
1681 if (flag
== F2FS_GET_BLOCK_PRECACHE
) {
1682 if (map
->m_flags
& F2FS_MAP_MAPPED
) {
1683 unsigned int ofs
= start_pgofs
- map
->m_lblk
;
1685 f2fs_update_extent_cache_range(&dn
,
1686 start_pgofs
, map
->m_pblk
+ ofs
,
1689 if (map
->m_next_extent
)
1690 *map
->m_next_extent
= pgofs
+ 1;
1692 f2fs_put_dnode(&dn
);
1694 if (map
->m_may_create
) {
1695 f2fs_do_map_lock(sbi
, flag
, false);
1696 f2fs_balance_fs(sbi
, dn
.node_changed
);
1699 trace_f2fs_map_blocks(inode
, map
, err
);
1703 bool f2fs_overwrite_io(struct inode
*inode
, loff_t pos
, size_t len
)
1705 struct f2fs_map_blocks map
;
1709 if (pos
+ len
> i_size_read(inode
))
1712 map
.m_lblk
= F2FS_BYTES_TO_BLK(pos
);
1713 map
.m_next_pgofs
= NULL
;
1714 map
.m_next_extent
= NULL
;
1715 map
.m_seg_type
= NO_CHECK_TYPE
;
1716 map
.m_may_create
= false;
1717 last_lblk
= F2FS_BLK_ALIGN(pos
+ len
);
1719 while (map
.m_lblk
< last_lblk
) {
1720 map
.m_len
= last_lblk
- map
.m_lblk
;
1721 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
1722 if (err
|| map
.m_len
== 0)
1724 map
.m_lblk
+= map
.m_len
;
1729 static inline u64
bytes_to_blks(struct inode
*inode
, u64 bytes
)
1731 return (bytes
>> inode
->i_blkbits
);
1734 static inline u64
blks_to_bytes(struct inode
*inode
, u64 blks
)
1736 return (blks
<< inode
->i_blkbits
);
1739 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1740 struct buffer_head
*bh
, int create
, int flag
,
1741 pgoff_t
*next_pgofs
, int seg_type
, bool may_write
)
1743 struct f2fs_map_blocks map
;
1746 map
.m_lblk
= iblock
;
1747 map
.m_len
= bytes_to_blks(inode
, bh
->b_size
);
1748 map
.m_next_pgofs
= next_pgofs
;
1749 map
.m_next_extent
= NULL
;
1750 map
.m_seg_type
= seg_type
;
1751 map
.m_may_create
= may_write
;
1753 err
= f2fs_map_blocks(inode
, &map
, create
, flag
);
1755 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
1756 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
1757 bh
->b_size
= blks_to_bytes(inode
, map
.m_len
);
1762 static int get_data_block_dio_write(struct inode
*inode
, sector_t iblock
,
1763 struct buffer_head
*bh_result
, int create
)
1765 return __get_data_block(inode
, iblock
, bh_result
, create
,
1766 F2FS_GET_BLOCK_DIO
, NULL
,
1767 f2fs_rw_hint_to_seg_type(inode
->i_write_hint
),
1771 static int get_data_block_dio(struct inode
*inode
, sector_t iblock
,
1772 struct buffer_head
*bh_result
, int create
)
1774 return __get_data_block(inode
, iblock
, bh_result
, create
,
1775 F2FS_GET_BLOCK_DIO
, NULL
,
1776 f2fs_rw_hint_to_seg_type(inode
->i_write_hint
),
1780 static int f2fs_xattr_fiemap(struct inode
*inode
,
1781 struct fiemap_extent_info
*fieinfo
)
1783 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1785 struct node_info ni
;
1786 __u64 phys
= 0, len
;
1788 nid_t xnid
= F2FS_I(inode
)->i_xattr_nid
;
1791 if (f2fs_has_inline_xattr(inode
)) {
1794 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
),
1795 inode
->i_ino
, false);
1799 err
= f2fs_get_node_info(sbi
, inode
->i_ino
, &ni
);
1801 f2fs_put_page(page
, 1);
1805 phys
= blks_to_bytes(inode
, ni
.blk_addr
);
1806 offset
= offsetof(struct f2fs_inode
, i_addr
) +
1807 sizeof(__le32
) * (DEF_ADDRS_PER_INODE
-
1808 get_inline_xattr_addrs(inode
));
1811 len
= inline_xattr_size(inode
);
1813 f2fs_put_page(page
, 1);
1815 flags
= FIEMAP_EXTENT_DATA_INLINE
| FIEMAP_EXTENT_NOT_ALIGNED
;
1818 flags
|= FIEMAP_EXTENT_LAST
;
1820 err
= fiemap_fill_next_extent(fieinfo
, 0, phys
, len
, flags
);
1821 trace_f2fs_fiemap(inode
, 0, phys
, len
, flags
, err
);
1822 if (err
|| err
== 1)
1827 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), xnid
, false);
1831 err
= f2fs_get_node_info(sbi
, xnid
, &ni
);
1833 f2fs_put_page(page
, 1);
1837 phys
= blks_to_bytes(inode
, ni
.blk_addr
);
1838 len
= inode
->i_sb
->s_blocksize
;
1840 f2fs_put_page(page
, 1);
1842 flags
= FIEMAP_EXTENT_LAST
;
1846 err
= fiemap_fill_next_extent(fieinfo
, 0, phys
, len
, flags
);
1847 trace_f2fs_fiemap(inode
, 0, phys
, len
, flags
, err
);
1850 return (err
< 0 ? err
: 0);
1853 static loff_t
max_inode_blocks(struct inode
*inode
)
1855 loff_t result
= ADDRS_PER_INODE(inode
);
1856 loff_t leaf_count
= ADDRS_PER_BLOCK(inode
);
1858 /* two direct node blocks */
1859 result
+= (leaf_count
* 2);
1861 /* two indirect node blocks */
1862 leaf_count
*= NIDS_PER_BLOCK
;
1863 result
+= (leaf_count
* 2);
1865 /* one double indirect node block */
1866 leaf_count
*= NIDS_PER_BLOCK
;
1867 result
+= leaf_count
;
1872 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1875 struct f2fs_map_blocks map
;
1876 sector_t start_blk
, last_blk
;
1878 u64 logical
= 0, phys
= 0, size
= 0;
1881 bool compr_cluster
= false, compr_appended
;
1882 unsigned int cluster_size
= F2FS_I(inode
)->i_cluster_size
;
1883 unsigned int count_in_cluster
= 0;
1886 if (fieinfo
->fi_flags
& FIEMAP_FLAG_CACHE
) {
1887 ret
= f2fs_precache_extents(inode
);
1892 ret
= fiemap_prep(inode
, fieinfo
, start
, &len
, FIEMAP_FLAG_XATTR
);
1898 maxbytes
= max_file_blocks(inode
) << F2FS_BLKSIZE_BITS
;
1899 if (start
> maxbytes
) {
1904 if (len
> maxbytes
|| (maxbytes
- len
) < start
)
1905 len
= maxbytes
- start
;
1907 if (fieinfo
->fi_flags
& FIEMAP_FLAG_XATTR
) {
1908 ret
= f2fs_xattr_fiemap(inode
, fieinfo
);
1912 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
1913 ret
= f2fs_inline_data_fiemap(inode
, fieinfo
, start
, len
);
1918 if (bytes_to_blks(inode
, len
) == 0)
1919 len
= blks_to_bytes(inode
, 1);
1921 start_blk
= bytes_to_blks(inode
, start
);
1922 last_blk
= bytes_to_blks(inode
, start
+ len
- 1);
1925 memset(&map
, 0, sizeof(map
));
1926 map
.m_lblk
= start_blk
;
1927 map
.m_len
= bytes_to_blks(inode
, len
);
1928 map
.m_next_pgofs
= &next_pgofs
;
1929 map
.m_seg_type
= NO_CHECK_TYPE
;
1931 if (compr_cluster
) {
1933 map
.m_len
= cluster_size
- count_in_cluster
;
1936 ret
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_FIEMAP
);
1941 if (!compr_cluster
&& !(map
.m_flags
& F2FS_MAP_FLAGS
)) {
1942 start_blk
= next_pgofs
;
1944 if (blks_to_bytes(inode
, start_blk
) < blks_to_bytes(inode
,
1945 max_inode_blocks(inode
)))
1948 flags
|= FIEMAP_EXTENT_LAST
;
1951 compr_appended
= false;
1952 /* In a case of compressed cluster, append this to the last extent */
1953 if (compr_cluster
&& ((map
.m_flags
& F2FS_MAP_UNWRITTEN
) ||
1954 !(map
.m_flags
& F2FS_MAP_FLAGS
))) {
1955 compr_appended
= true;
1960 flags
|= FIEMAP_EXTENT_MERGED
;
1961 if (IS_ENCRYPTED(inode
))
1962 flags
|= FIEMAP_EXTENT_DATA_ENCRYPTED
;
1964 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1966 trace_f2fs_fiemap(inode
, logical
, phys
, size
, flags
, ret
);
1972 if (start_blk
> last_blk
)
1976 if (map
.m_pblk
== COMPRESS_ADDR
) {
1977 compr_cluster
= true;
1978 count_in_cluster
= 1;
1979 } else if (compr_appended
) {
1980 unsigned int appended_blks
= cluster_size
-
1981 count_in_cluster
+ 1;
1982 size
+= blks_to_bytes(inode
, appended_blks
);
1983 start_blk
+= appended_blks
;
1984 compr_cluster
= false;
1986 logical
= blks_to_bytes(inode
, start_blk
);
1987 phys
= __is_valid_data_blkaddr(map
.m_pblk
) ?
1988 blks_to_bytes(inode
, map
.m_pblk
) : 0;
1989 size
= blks_to_bytes(inode
, map
.m_len
);
1992 if (compr_cluster
) {
1993 flags
= FIEMAP_EXTENT_ENCODED
;
1994 count_in_cluster
+= map
.m_len
;
1995 if (count_in_cluster
== cluster_size
) {
1996 compr_cluster
= false;
1997 size
+= blks_to_bytes(inode
, 1);
1999 } else if (map
.m_flags
& F2FS_MAP_UNWRITTEN
) {
2000 flags
= FIEMAP_EXTENT_UNWRITTEN
;
2003 start_blk
+= bytes_to_blks(inode
, size
);
2008 if (fatal_signal_pending(current
))
2016 inode_unlock(inode
);
2020 static inline loff_t
f2fs_readpage_limit(struct inode
*inode
)
2022 if (IS_ENABLED(CONFIG_FS_VERITY
) &&
2023 (IS_VERITY(inode
) || f2fs_verity_in_progress(inode
)))
2024 return inode
->i_sb
->s_maxbytes
;
2026 return i_size_read(inode
);
2029 static int f2fs_read_single_page(struct inode
*inode
, struct page
*page
,
2031 struct f2fs_map_blocks
*map
,
2032 struct bio
**bio_ret
,
2033 sector_t
*last_block_in_bio
,
2036 struct bio
*bio
= *bio_ret
;
2037 const unsigned blocksize
= blks_to_bytes(inode
, 1);
2038 sector_t block_in_file
;
2039 sector_t last_block
;
2040 sector_t last_block_in_file
;
2044 block_in_file
= (sector_t
)page_index(page
);
2045 last_block
= block_in_file
+ nr_pages
;
2046 last_block_in_file
= bytes_to_blks(inode
,
2047 f2fs_readpage_limit(inode
) + blocksize
- 1);
2048 if (last_block
> last_block_in_file
)
2049 last_block
= last_block_in_file
;
2051 /* just zeroing out page which is beyond EOF */
2052 if (block_in_file
>= last_block
)
2055 * Map blocks using the previous result first.
2057 if ((map
->m_flags
& F2FS_MAP_MAPPED
) &&
2058 block_in_file
> map
->m_lblk
&&
2059 block_in_file
< (map
->m_lblk
+ map
->m_len
))
2063 * Then do more f2fs_map_blocks() calls until we are
2064 * done with this page.
2066 map
->m_lblk
= block_in_file
;
2067 map
->m_len
= last_block
- block_in_file
;
2069 ret
= f2fs_map_blocks(inode
, map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2073 if ((map
->m_flags
& F2FS_MAP_MAPPED
)) {
2074 block_nr
= map
->m_pblk
+ block_in_file
- map
->m_lblk
;
2075 SetPageMappedToDisk(page
);
2077 if (!PageUptodate(page
) && (!PageSwapCache(page
) &&
2078 !cleancache_get_page(page
))) {
2079 SetPageUptodate(page
);
2083 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode
), block_nr
,
2084 DATA_GENERIC_ENHANCE_READ
)) {
2085 ret
= -EFSCORRUPTED
;
2090 zero_user_segment(page
, 0, PAGE_SIZE
);
2091 if (f2fs_need_verity(inode
, page
->index
) &&
2092 !fsverity_verify_page(page
)) {
2096 if (!PageUptodate(page
))
2097 SetPageUptodate(page
);
2103 * This page will go to BIO. Do we need to send this
2106 if (bio
&& (!page_is_mergeable(F2FS_I_SB(inode
), bio
,
2107 *last_block_in_bio
, block_nr
) ||
2108 !f2fs_crypt_mergeable_bio(bio
, inode
, page
->index
, NULL
))) {
2110 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
2114 bio
= f2fs_grab_read_bio(inode
, block_nr
, nr_pages
,
2115 is_readahead
? REQ_RAHEAD
: 0, page
->index
,
2125 * If the page is under writeback, we need to wait for
2126 * its completion to see the correct decrypted data.
2128 f2fs_wait_on_block_writeback(inode
, block_nr
);
2130 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
2131 goto submit_and_realloc
;
2133 inc_page_count(F2FS_I_SB(inode
), F2FS_RD_DATA
);
2134 f2fs_update_iostat(F2FS_I_SB(inode
), FS_DATA_READ_IO
, F2FS_BLKSIZE
);
2135 ClearPageError(page
);
2136 *last_block_in_bio
= block_nr
;
2140 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
2149 #ifdef CONFIG_F2FS_FS_COMPRESSION
2150 int f2fs_read_multi_pages(struct compress_ctx
*cc
, struct bio
**bio_ret
,
2151 unsigned nr_pages
, sector_t
*last_block_in_bio
,
2152 bool is_readahead
, bool for_write
)
2154 struct dnode_of_data dn
;
2155 struct inode
*inode
= cc
->inode
;
2156 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2157 struct bio
*bio
= *bio_ret
;
2158 unsigned int start_idx
= cc
->cluster_idx
<< cc
->log_cluster_size
;
2159 sector_t last_block_in_file
;
2160 const unsigned blocksize
= blks_to_bytes(inode
, 1);
2161 struct decompress_io_ctx
*dic
= NULL
;
2162 struct extent_info ei
= {0, };
2163 bool from_dnode
= true;
2167 f2fs_bug_on(sbi
, f2fs_cluster_is_empty(cc
));
2169 last_block_in_file
= bytes_to_blks(inode
,
2170 f2fs_readpage_limit(inode
) + blocksize
- 1);
2172 /* get rid of pages beyond EOF */
2173 for (i
= 0; i
< cc
->cluster_size
; i
++) {
2174 struct page
*page
= cc
->rpages
[i
];
2178 if ((sector_t
)page
->index
>= last_block_in_file
) {
2179 zero_user_segment(page
, 0, PAGE_SIZE
);
2180 if (!PageUptodate(page
))
2181 SetPageUptodate(page
);
2182 } else if (!PageUptodate(page
)) {
2188 cc
->rpages
[i
] = NULL
;
2192 /* we are done since all pages are beyond EOF */
2193 if (f2fs_cluster_is_empty(cc
))
2196 if (f2fs_lookup_extent_cache(inode
, start_idx
, &ei
))
2200 goto skip_reading_dnode
;
2202 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
2203 ret
= f2fs_get_dnode_of_data(&dn
, start_idx
, LOOKUP_NODE
);
2207 f2fs_bug_on(sbi
, dn
.data_blkaddr
!= COMPRESS_ADDR
);
2210 for (i
= 1; i
< cc
->cluster_size
; i
++) {
2213 blkaddr
= from_dnode
? data_blkaddr(dn
.inode
, dn
.node_page
,
2214 dn
.ofs_in_node
+ i
) :
2217 if (!__is_valid_data_blkaddr(blkaddr
))
2220 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC
)) {
2226 if (!from_dnode
&& i
>= ei
.c_len
)
2230 /* nothing to decompress */
2231 if (cc
->nr_cpages
== 0) {
2236 dic
= f2fs_alloc_dic(cc
);
2242 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
2243 struct page
*page
= dic
->cpages
[i
];
2245 struct bio_post_read_ctx
*ctx
;
2247 blkaddr
= from_dnode
? data_blkaddr(dn
.inode
, dn
.node_page
,
2248 dn
.ofs_in_node
+ i
+ 1) :
2251 f2fs_wait_on_block_writeback(inode
, blkaddr
);
2253 if (f2fs_load_compressed_page(sbi
, page
, blkaddr
)) {
2254 if (atomic_dec_and_test(&dic
->remaining_pages
))
2255 f2fs_decompress_cluster(dic
);
2259 if (bio
&& (!page_is_mergeable(sbi
, bio
,
2260 *last_block_in_bio
, blkaddr
) ||
2261 !f2fs_crypt_mergeable_bio(bio
, inode
, page
->index
, NULL
))) {
2263 __submit_bio(sbi
, bio
, DATA
);
2268 bio
= f2fs_grab_read_bio(inode
, blkaddr
, nr_pages
,
2269 is_readahead
? REQ_RAHEAD
: 0,
2270 page
->index
, for_write
);
2273 f2fs_decompress_end_io(dic
, ret
);
2274 f2fs_put_dnode(&dn
);
2280 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
2281 goto submit_and_realloc
;
2283 ctx
= get_post_read_ctx(bio
);
2284 ctx
->enabled_steps
|= STEP_DECOMPRESS
;
2285 refcount_inc(&dic
->refcnt
);
2287 inc_page_count(sbi
, F2FS_RD_DATA
);
2288 f2fs_update_iostat(sbi
, FS_DATA_READ_IO
, F2FS_BLKSIZE
);
2289 f2fs_update_iostat(sbi
, FS_CDATA_READ_IO
, F2FS_BLKSIZE
);
2290 ClearPageError(page
);
2291 *last_block_in_bio
= blkaddr
;
2295 f2fs_put_dnode(&dn
);
2302 f2fs_put_dnode(&dn
);
2304 for (i
= 0; i
< cc
->cluster_size
; i
++) {
2305 if (cc
->rpages
[i
]) {
2306 ClearPageUptodate(cc
->rpages
[i
]);
2307 ClearPageError(cc
->rpages
[i
]);
2308 unlock_page(cc
->rpages
[i
]);
2317 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2318 * Major change was from block_size == page_size in f2fs by default.
2320 static int f2fs_mpage_readpages(struct inode
*inode
,
2321 struct readahead_control
*rac
, struct page
*page
)
2323 struct bio
*bio
= NULL
;
2324 sector_t last_block_in_bio
= 0;
2325 struct f2fs_map_blocks map
;
2326 #ifdef CONFIG_F2FS_FS_COMPRESSION
2327 struct compress_ctx cc
= {
2329 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
2330 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
2331 .cluster_idx
= NULL_CLUSTER
,
2337 pgoff_t nc_cluster_idx
= NULL_CLUSTER
;
2339 unsigned nr_pages
= rac
? readahead_count(rac
) : 1;
2340 unsigned max_nr_pages
= nr_pages
;
2347 map
.m_next_pgofs
= NULL
;
2348 map
.m_next_extent
= NULL
;
2349 map
.m_seg_type
= NO_CHECK_TYPE
;
2350 map
.m_may_create
= false;
2352 for (; nr_pages
; nr_pages
--) {
2354 page
= readahead_page(rac
);
2355 prefetchw(&page
->flags
);
2358 #ifdef CONFIG_F2FS_FS_COMPRESSION
2359 if (f2fs_compressed_file(inode
)) {
2360 /* there are remained comressed pages, submit them */
2361 if (!f2fs_cluster_can_merge_page(&cc
, page
->index
)) {
2362 ret
= f2fs_read_multi_pages(&cc
, &bio
,
2365 rac
!= NULL
, false);
2366 f2fs_destroy_compress_ctx(&cc
, false);
2368 goto set_error_page
;
2370 if (cc
.cluster_idx
== NULL_CLUSTER
) {
2371 if (nc_cluster_idx
==
2372 page
->index
>> cc
.log_cluster_size
) {
2373 goto read_single_page
;
2376 ret
= f2fs_is_compressed_cluster(inode
, page
->index
);
2378 goto set_error_page
;
2381 page
->index
>> cc
.log_cluster_size
;
2382 goto read_single_page
;
2385 nc_cluster_idx
= NULL_CLUSTER
;
2387 ret
= f2fs_init_compress_ctx(&cc
);
2389 goto set_error_page
;
2391 f2fs_compress_ctx_add_page(&cc
, page
);
2398 ret
= f2fs_read_single_page(inode
, page
, max_nr_pages
, &map
,
2399 &bio
, &last_block_in_bio
, rac
);
2401 #ifdef CONFIG_F2FS_FS_COMPRESSION
2405 zero_user_segment(page
, 0, PAGE_SIZE
);
2408 #ifdef CONFIG_F2FS_FS_COMPRESSION
2414 #ifdef CONFIG_F2FS_FS_COMPRESSION
2415 if (f2fs_compressed_file(inode
)) {
2417 if (nr_pages
== 1 && !f2fs_cluster_is_empty(&cc
)) {
2418 ret
= f2fs_read_multi_pages(&cc
, &bio
,
2421 rac
!= NULL
, false);
2422 f2fs_destroy_compress_ctx(&cc
, false);
2428 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
2432 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
2434 struct inode
*inode
= page_file_mapping(page
)->host
;
2437 trace_f2fs_readpage(page
, DATA
);
2439 if (!f2fs_is_compress_backend_ready(inode
)) {
2444 /* If the file has inline data, try to read it directly */
2445 if (f2fs_has_inline_data(inode
))
2446 ret
= f2fs_read_inline_data(inode
, page
);
2448 ret
= f2fs_mpage_readpages(inode
, NULL
, page
);
2452 static void f2fs_readahead(struct readahead_control
*rac
)
2454 struct inode
*inode
= rac
->mapping
->host
;
2456 trace_f2fs_readpages(inode
, readahead_index(rac
), readahead_count(rac
));
2458 if (!f2fs_is_compress_backend_ready(inode
))
2461 /* If the file has inline data, skip readpages */
2462 if (f2fs_has_inline_data(inode
))
2465 f2fs_mpage_readpages(inode
, rac
, NULL
);
2468 int f2fs_encrypt_one_page(struct f2fs_io_info
*fio
)
2470 struct inode
*inode
= fio
->page
->mapping
->host
;
2471 struct page
*mpage
, *page
;
2472 gfp_t gfp_flags
= GFP_NOFS
;
2474 if (!f2fs_encrypted_file(inode
))
2477 page
= fio
->compressed_page
? fio
->compressed_page
: fio
->page
;
2479 /* wait for GCed page writeback via META_MAPPING */
2480 f2fs_wait_on_block_writeback(inode
, fio
->old_blkaddr
);
2482 if (fscrypt_inode_uses_inline_crypto(inode
))
2486 fio
->encrypted_page
= fscrypt_encrypt_pagecache_blocks(page
,
2487 PAGE_SIZE
, 0, gfp_flags
);
2488 if (IS_ERR(fio
->encrypted_page
)) {
2489 /* flush pending IOs and wait for a while in the ENOMEM case */
2490 if (PTR_ERR(fio
->encrypted_page
) == -ENOMEM
) {
2491 f2fs_flush_merged_writes(fio
->sbi
);
2492 congestion_wait(BLK_RW_ASYNC
, DEFAULT_IO_TIMEOUT
);
2493 gfp_flags
|= __GFP_NOFAIL
;
2496 return PTR_ERR(fio
->encrypted_page
);
2499 mpage
= find_lock_page(META_MAPPING(fio
->sbi
), fio
->old_blkaddr
);
2501 if (PageUptodate(mpage
))
2502 memcpy(page_address(mpage
),
2503 page_address(fio
->encrypted_page
), PAGE_SIZE
);
2504 f2fs_put_page(mpage
, 1);
2509 static inline bool check_inplace_update_policy(struct inode
*inode
,
2510 struct f2fs_io_info
*fio
)
2512 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2513 unsigned int policy
= SM_I(sbi
)->ipu_policy
;
2515 if (policy
& (0x1 << F2FS_IPU_FORCE
))
2517 if (policy
& (0x1 << F2FS_IPU_SSR
) && f2fs_need_SSR(sbi
))
2519 if (policy
& (0x1 << F2FS_IPU_UTIL
) &&
2520 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
2522 if (policy
& (0x1 << F2FS_IPU_SSR_UTIL
) && f2fs_need_SSR(sbi
) &&
2523 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
2527 * IPU for rewrite async pages
2529 if (policy
& (0x1 << F2FS_IPU_ASYNC
) &&
2530 fio
&& fio
->op
== REQ_OP_WRITE
&&
2531 !(fio
->op_flags
& REQ_SYNC
) &&
2532 !IS_ENCRYPTED(inode
))
2535 /* this is only set during fdatasync */
2536 if (policy
& (0x1 << F2FS_IPU_FSYNC
) &&
2537 is_inode_flag_set(inode
, FI_NEED_IPU
))
2540 if (unlikely(fio
&& is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
2541 !f2fs_is_checkpointed_data(sbi
, fio
->old_blkaddr
)))
2547 bool f2fs_should_update_inplace(struct inode
*inode
, struct f2fs_io_info
*fio
)
2549 /* swap file is migrating in aligned write mode */
2550 if (is_inode_flag_set(inode
, FI_ALIGNED_WRITE
))
2553 if (f2fs_is_pinned_file(inode
))
2556 /* if this is cold file, we should overwrite to avoid fragmentation */
2557 if (file_is_cold(inode
))
2560 return check_inplace_update_policy(inode
, fio
);
2563 bool f2fs_should_update_outplace(struct inode
*inode
, struct f2fs_io_info
*fio
)
2565 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2567 /* The below cases were checked when setting it. */
2568 if (f2fs_is_pinned_file(inode
))
2570 if (fio
&& is_sbi_flag_set(sbi
, SBI_NEED_FSCK
))
2572 if (f2fs_lfs_mode(sbi
))
2574 if (S_ISDIR(inode
->i_mode
))
2576 if (IS_NOQUOTA(inode
))
2578 if (f2fs_is_atomic_file(inode
))
2581 /* swap file is migrating in aligned write mode */
2582 if (is_inode_flag_set(inode
, FI_ALIGNED_WRITE
))
2586 if (page_private_gcing(fio
->page
))
2588 if (page_private_dummy(fio
->page
))
2590 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
2591 f2fs_is_checkpointed_data(sbi
, fio
->old_blkaddr
)))
2597 static inline bool need_inplace_update(struct f2fs_io_info
*fio
)
2599 struct inode
*inode
= fio
->page
->mapping
->host
;
2601 if (f2fs_should_update_outplace(inode
, fio
))
2604 return f2fs_should_update_inplace(inode
, fio
);
2607 int f2fs_do_write_data_page(struct f2fs_io_info
*fio
)
2609 struct page
*page
= fio
->page
;
2610 struct inode
*inode
= page
->mapping
->host
;
2611 struct dnode_of_data dn
;
2612 struct extent_info ei
= {0, };
2613 struct node_info ni
;
2614 bool ipu_force
= false;
2617 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
2618 if (need_inplace_update(fio
) &&
2619 f2fs_lookup_extent_cache(inode
, page
->index
, &ei
)) {
2620 fio
->old_blkaddr
= ei
.blk
+ page
->index
- ei
.fofs
;
2622 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->old_blkaddr
,
2623 DATA_GENERIC_ENHANCE
))
2624 return -EFSCORRUPTED
;
2627 fio
->need_lock
= LOCK_DONE
;
2631 /* Deadlock due to between page->lock and f2fs_lock_op */
2632 if (fio
->need_lock
== LOCK_REQ
&& !f2fs_trylock_op(fio
->sbi
))
2635 err
= f2fs_get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
2639 fio
->old_blkaddr
= dn
.data_blkaddr
;
2641 /* This page is already truncated */
2642 if (fio
->old_blkaddr
== NULL_ADDR
) {
2643 ClearPageUptodate(page
);
2644 clear_page_private_gcing(page
);
2648 if (__is_valid_data_blkaddr(fio
->old_blkaddr
) &&
2649 !f2fs_is_valid_blkaddr(fio
->sbi
, fio
->old_blkaddr
,
2650 DATA_GENERIC_ENHANCE
)) {
2651 err
= -EFSCORRUPTED
;
2655 * If current allocation needs SSR,
2656 * it had better in-place writes for updated data.
2659 (__is_valid_data_blkaddr(fio
->old_blkaddr
) &&
2660 need_inplace_update(fio
))) {
2661 err
= f2fs_encrypt_one_page(fio
);
2665 set_page_writeback(page
);
2666 ClearPageError(page
);
2667 f2fs_put_dnode(&dn
);
2668 if (fio
->need_lock
== LOCK_REQ
)
2669 f2fs_unlock_op(fio
->sbi
);
2670 err
= f2fs_inplace_write_data(fio
);
2672 if (fscrypt_inode_uses_fs_layer_crypto(inode
))
2673 fscrypt_finalize_bounce_page(&fio
->encrypted_page
);
2674 if (PageWriteback(page
))
2675 end_page_writeback(page
);
2677 set_inode_flag(inode
, FI_UPDATE_WRITE
);
2679 trace_f2fs_do_write_data_page(fio
->page
, IPU
);
2683 if (fio
->need_lock
== LOCK_RETRY
) {
2684 if (!f2fs_trylock_op(fio
->sbi
)) {
2688 fio
->need_lock
= LOCK_REQ
;
2691 err
= f2fs_get_node_info(fio
->sbi
, dn
.nid
, &ni
);
2695 fio
->version
= ni
.version
;
2697 err
= f2fs_encrypt_one_page(fio
);
2701 set_page_writeback(page
);
2702 ClearPageError(page
);
2704 if (fio
->compr_blocks
&& fio
->old_blkaddr
== COMPRESS_ADDR
)
2705 f2fs_i_compr_blocks_update(inode
, fio
->compr_blocks
- 1, false);
2707 /* LFS mode write path */
2708 f2fs_outplace_write_data(&dn
, fio
);
2709 trace_f2fs_do_write_data_page(page
, OPU
);
2710 set_inode_flag(inode
, FI_APPEND_WRITE
);
2711 if (page
->index
== 0)
2712 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
2714 f2fs_put_dnode(&dn
);
2716 if (fio
->need_lock
== LOCK_REQ
)
2717 f2fs_unlock_op(fio
->sbi
);
2721 int f2fs_write_single_data_page(struct page
*page
, int *submitted
,
2723 sector_t
*last_block
,
2724 struct writeback_control
*wbc
,
2725 enum iostat_type io_type
,
2729 struct inode
*inode
= page
->mapping
->host
;
2730 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2731 loff_t i_size
= i_size_read(inode
);
2732 const pgoff_t end_index
= ((unsigned long long)i_size
)
2734 loff_t psize
= (loff_t
)(page
->index
+ 1) << PAGE_SHIFT
;
2735 unsigned offset
= 0;
2736 bool need_balance_fs
= false;
2738 struct f2fs_io_info fio
= {
2740 .ino
= inode
->i_ino
,
2743 .op_flags
= wbc_to_write_flags(wbc
),
2744 .old_blkaddr
= NULL_ADDR
,
2746 .encrypted_page
= NULL
,
2748 .compr_blocks
= compr_blocks
,
2749 .need_lock
= LOCK_RETRY
,
2753 .last_block
= last_block
,
2756 trace_f2fs_writepage(page
, DATA
);
2758 /* we should bypass data pages to proceed the kworkder jobs */
2759 if (unlikely(f2fs_cp_error(sbi
))) {
2760 mapping_set_error(page
->mapping
, -EIO
);
2762 * don't drop any dirty dentry pages for keeping lastest
2763 * directory structure.
2765 if (S_ISDIR(inode
->i_mode
))
2770 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
2773 if (page
->index
< end_index
||
2774 f2fs_verity_in_progress(inode
) ||
2779 * If the offset is out-of-range of file size,
2780 * this page does not have to be written to disk.
2782 offset
= i_size
& (PAGE_SIZE
- 1);
2783 if ((page
->index
>= end_index
+ 1) || !offset
)
2786 zero_user_segment(page
, offset
, PAGE_SIZE
);
2788 if (f2fs_is_drop_cache(inode
))
2790 /* we should not write 0'th page having journal header */
2791 if (f2fs_is_volatile_file(inode
) && (!page
->index
||
2792 (!wbc
->for_reclaim
&&
2793 f2fs_available_free_memory(sbi
, BASE_CHECK
))))
2796 /* Dentry/quota blocks are controlled by checkpoint */
2797 if (S_ISDIR(inode
->i_mode
) || IS_NOQUOTA(inode
)) {
2799 * We need to wait for node_write to avoid block allocation during
2800 * checkpoint. This can only happen to quota writes which can cause
2801 * the below discard race condition.
2803 if (IS_NOQUOTA(inode
))
2804 down_read(&sbi
->node_write
);
2806 fio
.need_lock
= LOCK_DONE
;
2807 err
= f2fs_do_write_data_page(&fio
);
2809 if (IS_NOQUOTA(inode
))
2810 up_read(&sbi
->node_write
);
2815 if (!wbc
->for_reclaim
)
2816 need_balance_fs
= true;
2817 else if (has_not_enough_free_secs(sbi
, 0, 0))
2820 set_inode_flag(inode
, FI_HOT_DATA
);
2823 if (f2fs_has_inline_data(inode
)) {
2824 err
= f2fs_write_inline_data(inode
, page
);
2829 if (err
== -EAGAIN
) {
2830 err
= f2fs_do_write_data_page(&fio
);
2831 if (err
== -EAGAIN
) {
2832 fio
.need_lock
= LOCK_REQ
;
2833 err
= f2fs_do_write_data_page(&fio
);
2838 file_set_keep_isize(inode
);
2840 spin_lock(&F2FS_I(inode
)->i_size_lock
);
2841 if (F2FS_I(inode
)->last_disk_size
< psize
)
2842 F2FS_I(inode
)->last_disk_size
= psize
;
2843 spin_unlock(&F2FS_I(inode
)->i_size_lock
);
2847 if (err
&& err
!= -ENOENT
)
2851 inode_dec_dirty_pages(inode
);
2853 ClearPageUptodate(page
);
2854 clear_page_private_gcing(page
);
2857 if (wbc
->for_reclaim
) {
2858 f2fs_submit_merged_write_cond(sbi
, NULL
, page
, 0, DATA
);
2859 clear_inode_flag(inode
, FI_HOT_DATA
);
2860 f2fs_remove_dirty_inode(inode
);
2864 if (!S_ISDIR(inode
->i_mode
) && !IS_NOQUOTA(inode
) &&
2865 !F2FS_I(inode
)->cp_task
&& allow_balance
)
2866 f2fs_balance_fs(sbi
, need_balance_fs
);
2868 if (unlikely(f2fs_cp_error(sbi
))) {
2869 f2fs_submit_merged_write(sbi
, DATA
);
2870 f2fs_submit_merged_ipu_write(sbi
, bio
, NULL
);
2875 *submitted
= fio
.submitted
? 1 : 0;
2880 redirty_page_for_writepage(wbc
, page
);
2882 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2883 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2884 * file_write_and_wait_range() will see EIO error, which is critical
2885 * to return value of fsync() followed by atomic_write failure to user.
2887 if (!err
|| wbc
->for_reclaim
)
2888 return AOP_WRITEPAGE_ACTIVATE
;
2893 static int f2fs_write_data_page(struct page
*page
,
2894 struct writeback_control
*wbc
)
2896 #ifdef CONFIG_F2FS_FS_COMPRESSION
2897 struct inode
*inode
= page
->mapping
->host
;
2899 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
2902 if (f2fs_compressed_file(inode
)) {
2903 if (f2fs_is_compressed_cluster(inode
, page
->index
)) {
2904 redirty_page_for_writepage(wbc
, page
);
2905 return AOP_WRITEPAGE_ACTIVATE
;
2911 return f2fs_write_single_data_page(page
, NULL
, NULL
, NULL
,
2912 wbc
, FS_DATA_IO
, 0, true);
2916 * This function was copied from write_cche_pages from mm/page-writeback.c.
2917 * The major change is making write step of cold data page separately from
2918 * warm/hot data page.
2920 static int f2fs_write_cache_pages(struct address_space
*mapping
,
2921 struct writeback_control
*wbc
,
2922 enum iostat_type io_type
)
2925 int done
= 0, retry
= 0;
2926 struct pagevec pvec
;
2927 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
2928 struct bio
*bio
= NULL
;
2929 sector_t last_block
;
2930 #ifdef CONFIG_F2FS_FS_COMPRESSION
2931 struct inode
*inode
= mapping
->host
;
2932 struct compress_ctx cc
= {
2934 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
2935 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
2936 .cluster_idx
= NULL_CLUSTER
,
2942 .rlen
= PAGE_SIZE
* F2FS_I(inode
)->i_cluster_size
,
2948 pgoff_t end
; /* Inclusive */
2950 int range_whole
= 0;
2956 pagevec_init(&pvec
);
2958 if (get_dirty_pages(mapping
->host
) <=
2959 SM_I(F2FS_M_SB(mapping
))->min_hot_blocks
)
2960 set_inode_flag(mapping
->host
, FI_HOT_DATA
);
2962 clear_inode_flag(mapping
->host
, FI_HOT_DATA
);
2964 if (wbc
->range_cyclic
) {
2965 index
= mapping
->writeback_index
; /* prev offset */
2968 index
= wbc
->range_start
>> PAGE_SHIFT
;
2969 end
= wbc
->range_end
>> PAGE_SHIFT
;
2970 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
2973 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2974 tag
= PAGECACHE_TAG_TOWRITE
;
2976 tag
= PAGECACHE_TAG_DIRTY
;
2979 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2980 tag_pages_for_writeback(mapping
, index
, end
);
2982 while (!done
&& !retry
&& (index
<= end
)) {
2983 nr_pages
= pagevec_lookup_range_tag(&pvec
, mapping
, &index
, end
,
2988 for (i
= 0; i
< nr_pages
; i
++) {
2989 struct page
*page
= pvec
.pages
[i
];
2993 #ifdef CONFIG_F2FS_FS_COMPRESSION
2994 if (f2fs_compressed_file(inode
)) {
2995 ret
= f2fs_init_compress_ctx(&cc
);
3001 if (!f2fs_cluster_can_merge_page(&cc
,
3003 ret
= f2fs_write_multi_pages(&cc
,
3004 &submitted
, wbc
, io_type
);
3010 if (unlikely(f2fs_cp_error(sbi
)))
3013 if (f2fs_cluster_is_empty(&cc
)) {
3014 void *fsdata
= NULL
;
3018 ret2
= f2fs_prepare_compress_overwrite(
3020 page
->index
, &fsdata
);
3026 !f2fs_compress_write_end(inode
,
3027 fsdata
, page
->index
,
3037 /* give a priority to WB_SYNC threads */
3038 if (atomic_read(&sbi
->wb_sync_req
[DATA
]) &&
3039 wbc
->sync_mode
== WB_SYNC_NONE
) {
3043 #ifdef CONFIG_F2FS_FS_COMPRESSION
3046 done_index
= page
->index
;
3050 if (unlikely(page
->mapping
!= mapping
)) {
3056 if (!PageDirty(page
)) {
3057 /* someone wrote it for us */
3058 goto continue_unlock
;
3061 if (PageWriteback(page
)) {
3062 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
3063 f2fs_wait_on_page_writeback(page
,
3066 goto continue_unlock
;
3069 if (!clear_page_dirty_for_io(page
))
3070 goto continue_unlock
;
3072 #ifdef CONFIG_F2FS_FS_COMPRESSION
3073 if (f2fs_compressed_file(inode
)) {
3075 f2fs_compress_ctx_add_page(&cc
, page
);
3079 ret
= f2fs_write_single_data_page(page
, &submitted
,
3080 &bio
, &last_block
, wbc
, io_type
,
3082 if (ret
== AOP_WRITEPAGE_ACTIVATE
)
3084 #ifdef CONFIG_F2FS_FS_COMPRESSION
3087 nwritten
+= submitted
;
3088 wbc
->nr_to_write
-= submitted
;
3090 if (unlikely(ret
)) {
3092 * keep nr_to_write, since vfs uses this to
3093 * get # of written pages.
3095 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
3098 } else if (ret
== -EAGAIN
) {
3100 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
3102 congestion_wait(BLK_RW_ASYNC
,
3103 DEFAULT_IO_TIMEOUT
);
3108 done_index
= page
->index
+ 1;
3113 if (wbc
->nr_to_write
<= 0 &&
3114 wbc
->sync_mode
== WB_SYNC_NONE
) {
3122 pagevec_release(&pvec
);
3125 #ifdef CONFIG_F2FS_FS_COMPRESSION
3126 /* flush remained pages in compress cluster */
3127 if (f2fs_compressed_file(inode
) && !f2fs_cluster_is_empty(&cc
)) {
3128 ret
= f2fs_write_multi_pages(&cc
, &submitted
, wbc
, io_type
);
3129 nwritten
+= submitted
;
3130 wbc
->nr_to_write
-= submitted
;
3136 if (f2fs_compressed_file(inode
))
3137 f2fs_destroy_compress_ctx(&cc
, false);
3144 if (wbc
->range_cyclic
&& !done
)
3146 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
3147 mapping
->writeback_index
= done_index
;
3150 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping
), mapping
->host
,
3152 /* submit cached bio of IPU write */
3154 f2fs_submit_merged_ipu_write(sbi
, &bio
, NULL
);
3159 static inline bool __should_serialize_io(struct inode
*inode
,
3160 struct writeback_control
*wbc
)
3162 /* to avoid deadlock in path of data flush */
3163 if (F2FS_I(inode
)->cp_task
)
3166 if (!S_ISREG(inode
->i_mode
))
3168 if (IS_NOQUOTA(inode
))
3171 if (f2fs_need_compress_data(inode
))
3173 if (wbc
->sync_mode
!= WB_SYNC_ALL
)
3175 if (get_dirty_pages(inode
) >= SM_I(F2FS_I_SB(inode
))->min_seq_blocks
)
3180 static int __f2fs_write_data_pages(struct address_space
*mapping
,
3181 struct writeback_control
*wbc
,
3182 enum iostat_type io_type
)
3184 struct inode
*inode
= mapping
->host
;
3185 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3186 struct blk_plug plug
;
3188 bool locked
= false;
3190 /* deal with chardevs and other special file */
3191 if (!mapping
->a_ops
->writepage
)
3194 /* skip writing if there is no dirty page in this inode */
3195 if (!get_dirty_pages(inode
) && wbc
->sync_mode
== WB_SYNC_NONE
)
3198 /* during POR, we don't need to trigger writepage at all. */
3199 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
3202 if ((S_ISDIR(inode
->i_mode
) || IS_NOQUOTA(inode
)) &&
3203 wbc
->sync_mode
== WB_SYNC_NONE
&&
3204 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
3205 f2fs_available_free_memory(sbi
, DIRTY_DENTS
))
3208 /* skip writing during file defragment */
3209 if (is_inode_flag_set(inode
, FI_DO_DEFRAG
))
3212 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
3214 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3215 if (wbc
->sync_mode
== WB_SYNC_ALL
)
3216 atomic_inc(&sbi
->wb_sync_req
[DATA
]);
3217 else if (atomic_read(&sbi
->wb_sync_req
[DATA
])) {
3218 /* to avoid potential deadlock */
3220 blk_finish_plug(current
->plug
);
3224 if (__should_serialize_io(inode
, wbc
)) {
3225 mutex_lock(&sbi
->writepages
);
3229 blk_start_plug(&plug
);
3230 ret
= f2fs_write_cache_pages(mapping
, wbc
, io_type
);
3231 blk_finish_plug(&plug
);
3234 mutex_unlock(&sbi
->writepages
);
3236 if (wbc
->sync_mode
== WB_SYNC_ALL
)
3237 atomic_dec(&sbi
->wb_sync_req
[DATA
]);
3239 * if some pages were truncated, we cannot guarantee its mapping->host
3240 * to detect pending bios.
3243 f2fs_remove_dirty_inode(inode
);
3247 wbc
->pages_skipped
+= get_dirty_pages(inode
);
3248 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
3252 static int f2fs_write_data_pages(struct address_space
*mapping
,
3253 struct writeback_control
*wbc
)
3255 struct inode
*inode
= mapping
->host
;
3257 return __f2fs_write_data_pages(mapping
, wbc
,
3258 F2FS_I(inode
)->cp_task
== current
?
3259 FS_CP_DATA_IO
: FS_DATA_IO
);
3262 static void f2fs_write_failed(struct inode
*inode
, loff_t to
)
3264 loff_t i_size
= i_size_read(inode
);
3266 if (IS_NOQUOTA(inode
))
3269 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3270 if (to
> i_size
&& !f2fs_verity_in_progress(inode
)) {
3271 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3272 filemap_invalidate_lock(inode
->i_mapping
);
3274 truncate_pagecache(inode
, i_size
);
3275 f2fs_truncate_blocks(inode
, i_size
, true);
3277 filemap_invalidate_unlock(inode
->i_mapping
);
3278 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3282 static int prepare_write_begin(struct f2fs_sb_info
*sbi
,
3283 struct page
*page
, loff_t pos
, unsigned len
,
3284 block_t
*blk_addr
, bool *node_changed
)
3286 struct inode
*inode
= page
->mapping
->host
;
3287 pgoff_t index
= page
->index
;
3288 struct dnode_of_data dn
;
3290 bool locked
= false;
3291 struct extent_info ei
= {0, };
3296 * we already allocated all the blocks, so we don't need to get
3297 * the block addresses when there is no need to fill the page.
3299 if (!f2fs_has_inline_data(inode
) && len
== PAGE_SIZE
&&
3300 !is_inode_flag_set(inode
, FI_NO_PREALLOC
) &&
3301 !f2fs_verity_in_progress(inode
))
3304 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3305 if (f2fs_has_inline_data(inode
) && pos
+ len
> MAX_INLINE_DATA(inode
))
3306 flag
= F2FS_GET_BLOCK_DEFAULT
;
3308 flag
= F2FS_GET_BLOCK_PRE_AIO
;
3310 if (f2fs_has_inline_data(inode
) ||
3311 (pos
& PAGE_MASK
) >= i_size_read(inode
)) {
3312 f2fs_do_map_lock(sbi
, flag
, true);
3317 /* check inline_data */
3318 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
3319 if (IS_ERR(ipage
)) {
3320 err
= PTR_ERR(ipage
);
3324 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
3326 if (f2fs_has_inline_data(inode
)) {
3327 if (pos
+ len
<= MAX_INLINE_DATA(inode
)) {
3328 f2fs_do_read_inline_data(page
, ipage
);
3329 set_inode_flag(inode
, FI_DATA_EXIST
);
3331 set_page_private_inline(ipage
);
3333 err
= f2fs_convert_inline_page(&dn
, page
);
3336 if (dn
.data_blkaddr
== NULL_ADDR
)
3337 err
= f2fs_get_block(&dn
, index
);
3339 } else if (locked
) {
3340 err
= f2fs_get_block(&dn
, index
);
3342 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
3343 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
3346 err
= f2fs_get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
3347 if (err
|| dn
.data_blkaddr
== NULL_ADDR
) {
3348 f2fs_put_dnode(&dn
);
3349 f2fs_do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
,
3351 WARN_ON(flag
!= F2FS_GET_BLOCK_PRE_AIO
);
3358 /* convert_inline_page can make node_changed */
3359 *blk_addr
= dn
.data_blkaddr
;
3360 *node_changed
= dn
.node_changed
;
3362 f2fs_put_dnode(&dn
);
3365 f2fs_do_map_lock(sbi
, flag
, false);
3369 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
3370 loff_t pos
, unsigned len
, unsigned flags
,
3371 struct page
**pagep
, void **fsdata
)
3373 struct inode
*inode
= mapping
->host
;
3374 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3375 struct page
*page
= NULL
;
3376 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_SHIFT
;
3377 bool need_balance
= false, drop_atomic
= false;
3378 block_t blkaddr
= NULL_ADDR
;
3381 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
3383 if (!f2fs_is_checkpoint_ready(sbi
)) {
3388 if ((f2fs_is_atomic_file(inode
) &&
3389 !f2fs_available_free_memory(sbi
, INMEM_PAGES
)) ||
3390 is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
3397 * We should check this at this moment to avoid deadlock on inode page
3398 * and #0 page. The locking rule for inline_data conversion should be:
3399 * lock_page(page #0) -> lock_page(inode_page)
3402 err
= f2fs_convert_inline_inode(inode
);
3407 #ifdef CONFIG_F2FS_FS_COMPRESSION
3408 if (f2fs_compressed_file(inode
)) {
3413 if (len
== PAGE_SIZE
)
3416 ret
= f2fs_prepare_compress_overwrite(inode
, pagep
,
3429 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3430 * wait_for_stable_page. Will wait that below with our IO control.
3432 page
= f2fs_pagecache_get_page(mapping
, index
,
3433 FGP_LOCK
| FGP_WRITE
| FGP_CREAT
, GFP_NOFS
);
3439 /* TODO: cluster can be compressed due to race with .writepage */
3443 err
= prepare_write_begin(sbi
, page
, pos
, len
,
3444 &blkaddr
, &need_balance
);
3448 if (need_balance
&& !IS_NOQUOTA(inode
) &&
3449 has_not_enough_free_secs(sbi
, 0, 0)) {
3451 f2fs_balance_fs(sbi
, true);
3453 if (page
->mapping
!= mapping
) {
3454 /* The page got truncated from under us */
3455 f2fs_put_page(page
, 1);
3460 f2fs_wait_on_page_writeback(page
, DATA
, false, true);
3462 if (len
== PAGE_SIZE
|| PageUptodate(page
))
3465 if (!(pos
& (PAGE_SIZE
- 1)) && (pos
+ len
) >= i_size_read(inode
) &&
3466 !f2fs_verity_in_progress(inode
)) {
3467 zero_user_segment(page
, len
, PAGE_SIZE
);
3471 if (blkaddr
== NEW_ADDR
) {
3472 zero_user_segment(page
, 0, PAGE_SIZE
);
3473 SetPageUptodate(page
);
3475 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
3476 DATA_GENERIC_ENHANCE_READ
)) {
3477 err
= -EFSCORRUPTED
;
3480 err
= f2fs_submit_page_read(inode
, page
, blkaddr
, 0, true);
3485 if (unlikely(page
->mapping
!= mapping
)) {
3486 f2fs_put_page(page
, 1);
3489 if (unlikely(!PageUptodate(page
))) {
3497 f2fs_put_page(page
, 1);
3498 f2fs_write_failed(inode
, pos
+ len
);
3500 f2fs_drop_inmem_pages_all(sbi
, false);
3504 static int f2fs_write_end(struct file
*file
,
3505 struct address_space
*mapping
,
3506 loff_t pos
, unsigned len
, unsigned copied
,
3507 struct page
*page
, void *fsdata
)
3509 struct inode
*inode
= page
->mapping
->host
;
3511 trace_f2fs_write_end(inode
, pos
, len
, copied
);
3514 * This should be come from len == PAGE_SIZE, and we expect copied
3515 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3516 * let generic_perform_write() try to copy data again through copied=0.
3518 if (!PageUptodate(page
)) {
3519 if (unlikely(copied
!= len
))
3522 SetPageUptodate(page
);
3525 #ifdef CONFIG_F2FS_FS_COMPRESSION
3526 /* overwrite compressed file */
3527 if (f2fs_compressed_file(inode
) && fsdata
) {
3528 f2fs_compress_write_end(inode
, fsdata
, page
->index
, copied
);
3529 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3531 if (pos
+ copied
> i_size_read(inode
) &&
3532 !f2fs_verity_in_progress(inode
))
3533 f2fs_i_size_write(inode
, pos
+ copied
);
3541 set_page_dirty(page
);
3543 if (pos
+ copied
> i_size_read(inode
) &&
3544 !f2fs_verity_in_progress(inode
))
3545 f2fs_i_size_write(inode
, pos
+ copied
);
3547 f2fs_put_page(page
, 1);
3548 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3552 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
3555 unsigned i_blkbits
= READ_ONCE(inode
->i_blkbits
);
3556 unsigned blkbits
= i_blkbits
;
3557 unsigned blocksize_mask
= (1 << blkbits
) - 1;
3558 unsigned long align
= offset
| iov_iter_alignment(iter
);
3559 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
3561 if (iov_iter_rw(iter
) == READ
&& offset
>= i_size_read(inode
))
3564 if (align
& blocksize_mask
) {
3566 blkbits
= blksize_bits(bdev_logical_block_size(bdev
));
3567 blocksize_mask
= (1 << blkbits
) - 1;
3568 if (align
& blocksize_mask
)
3575 static void f2fs_dio_end_io(struct bio
*bio
)
3577 struct f2fs_private_dio
*dio
= bio
->bi_private
;
3579 dec_page_count(F2FS_I_SB(dio
->inode
),
3580 dio
->write
? F2FS_DIO_WRITE
: F2FS_DIO_READ
);
3582 bio
->bi_private
= dio
->orig_private
;
3583 bio
->bi_end_io
= dio
->orig_end_io
;
3590 static void f2fs_dio_submit_bio(struct bio
*bio
, struct inode
*inode
,
3593 struct f2fs_private_dio
*dio
;
3594 bool write
= (bio_op(bio
) == REQ_OP_WRITE
);
3596 dio
= f2fs_kzalloc(F2FS_I_SB(inode
),
3597 sizeof(struct f2fs_private_dio
), GFP_NOFS
);
3602 dio
->orig_end_io
= bio
->bi_end_io
;
3603 dio
->orig_private
= bio
->bi_private
;
3606 bio
->bi_end_io
= f2fs_dio_end_io
;
3607 bio
->bi_private
= dio
;
3609 inc_page_count(F2FS_I_SB(inode
),
3610 write
? F2FS_DIO_WRITE
: F2FS_DIO_READ
);
3615 bio
->bi_status
= BLK_STS_IOERR
;
3619 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
3621 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
3622 struct inode
*inode
= mapping
->host
;
3623 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3624 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3625 size_t count
= iov_iter_count(iter
);
3626 loff_t offset
= iocb
->ki_pos
;
3627 int rw
= iov_iter_rw(iter
);
3629 enum rw_hint hint
= iocb
->ki_hint
;
3630 int whint_mode
= F2FS_OPTION(sbi
).whint_mode
;
3633 err
= check_direct_IO(inode
, iter
, offset
);
3635 return err
< 0 ? err
: 0;
3637 if (f2fs_force_buffered_io(inode
, iocb
, iter
))
3640 do_opu
= rw
== WRITE
&& f2fs_lfs_mode(sbi
);
3642 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
3644 if (rw
== WRITE
&& whint_mode
== WHINT_MODE_OFF
)
3645 iocb
->ki_hint
= WRITE_LIFE_NOT_SET
;
3647 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
3648 if (!down_read_trylock(&fi
->i_gc_rwsem
[rw
])) {
3649 iocb
->ki_hint
= hint
;
3653 if (do_opu
&& !down_read_trylock(&fi
->i_gc_rwsem
[READ
])) {
3654 up_read(&fi
->i_gc_rwsem
[rw
]);
3655 iocb
->ki_hint
= hint
;
3660 down_read(&fi
->i_gc_rwsem
[rw
]);
3662 down_read(&fi
->i_gc_rwsem
[READ
]);
3665 err
= __blockdev_direct_IO(iocb
, inode
, inode
->i_sb
->s_bdev
,
3666 iter
, rw
== WRITE
? get_data_block_dio_write
:
3667 get_data_block_dio
, NULL
, f2fs_dio_submit_bio
,
3668 rw
== WRITE
? DIO_LOCKING
| DIO_SKIP_HOLES
:
3672 up_read(&fi
->i_gc_rwsem
[READ
]);
3674 up_read(&fi
->i_gc_rwsem
[rw
]);
3677 if (whint_mode
== WHINT_MODE_OFF
)
3678 iocb
->ki_hint
= hint
;
3680 f2fs_update_iostat(F2FS_I_SB(inode
), APP_DIRECT_IO
,
3683 set_inode_flag(inode
, FI_UPDATE_WRITE
);
3684 } else if (err
== -EIOCBQUEUED
) {
3685 f2fs_update_iostat(F2FS_I_SB(inode
), APP_DIRECT_IO
,
3686 count
- iov_iter_count(iter
));
3687 } else if (err
< 0) {
3688 f2fs_write_failed(inode
, offset
+ count
);
3692 f2fs_update_iostat(sbi
, APP_DIRECT_READ_IO
, err
);
3693 else if (err
== -EIOCBQUEUED
)
3694 f2fs_update_iostat(F2FS_I_SB(inode
), APP_DIRECT_READ_IO
,
3695 count
- iov_iter_count(iter
));
3699 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
3704 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
3705 unsigned int length
)
3707 struct inode
*inode
= page
->mapping
->host
;
3708 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3710 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
3711 (offset
% PAGE_SIZE
|| length
!= PAGE_SIZE
))
3714 if (PageDirty(page
)) {
3715 if (inode
->i_ino
== F2FS_META_INO(sbi
)) {
3716 dec_page_count(sbi
, F2FS_DIRTY_META
);
3717 } else if (inode
->i_ino
== F2FS_NODE_INO(sbi
)) {
3718 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
3720 inode_dec_dirty_pages(inode
);
3721 f2fs_remove_dirty_inode(inode
);
3725 clear_page_private_gcing(page
);
3727 if (test_opt(sbi
, COMPRESS_CACHE
)) {
3728 if (f2fs_compressed_file(inode
))
3729 f2fs_invalidate_compress_pages(sbi
, inode
->i_ino
);
3730 if (inode
->i_ino
== F2FS_COMPRESS_INO(sbi
))
3731 clear_page_private_data(page
);
3734 if (page_private_atomic(page
))
3735 return f2fs_drop_inmem_page(inode
, page
);
3737 detach_page_private(page
);
3738 set_page_private(page
, 0);
3741 int f2fs_release_page(struct page
*page
, gfp_t wait
)
3743 /* If this is dirty page, keep PagePrivate */
3744 if (PageDirty(page
))
3747 /* This is atomic written page, keep Private */
3748 if (page_private_atomic(page
))
3751 if (test_opt(F2FS_P_SB(page
), COMPRESS_CACHE
)) {
3752 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
3753 struct inode
*inode
= page
->mapping
->host
;
3755 if (f2fs_compressed_file(inode
))
3756 f2fs_invalidate_compress_pages(sbi
, inode
->i_ino
);
3757 if (inode
->i_ino
== F2FS_COMPRESS_INO(sbi
))
3758 clear_page_private_data(page
);
3761 clear_page_private_gcing(page
);
3763 detach_page_private(page
);
3764 set_page_private(page
, 0);
3768 static int f2fs_set_data_page_dirty(struct page
*page
)
3770 struct inode
*inode
= page_file_mapping(page
)->host
;
3772 trace_f2fs_set_page_dirty(page
, DATA
);
3774 if (!PageUptodate(page
))
3775 SetPageUptodate(page
);
3776 if (PageSwapCache(page
))
3777 return __set_page_dirty_nobuffers(page
);
3779 if (f2fs_is_atomic_file(inode
) && !f2fs_is_commit_atomic_write(inode
)) {
3780 if (!page_private_atomic(page
)) {
3781 f2fs_register_inmem_page(inode
, page
);
3785 * Previously, this page has been registered, we just
3791 if (!PageDirty(page
)) {
3792 __set_page_dirty_nobuffers(page
);
3793 f2fs_update_dirty_page(inode
, page
);
3800 static sector_t
f2fs_bmap_compress(struct inode
*inode
, sector_t block
)
3802 #ifdef CONFIG_F2FS_FS_COMPRESSION
3803 struct dnode_of_data dn
;
3804 sector_t start_idx
, blknr
= 0;
3807 start_idx
= round_down(block
, F2FS_I(inode
)->i_cluster_size
);
3809 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
3810 ret
= f2fs_get_dnode_of_data(&dn
, start_idx
, LOOKUP_NODE
);
3814 if (dn
.data_blkaddr
!= COMPRESS_ADDR
) {
3815 dn
.ofs_in_node
+= block
- start_idx
;
3816 blknr
= f2fs_data_blkaddr(&dn
);
3817 if (!__is_valid_data_blkaddr(blknr
))
3821 f2fs_put_dnode(&dn
);
3829 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
3831 struct inode
*inode
= mapping
->host
;
3834 if (f2fs_has_inline_data(inode
))
3837 /* make sure allocating whole blocks */
3838 if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
3839 filemap_write_and_wait(mapping
);
3841 /* Block number less than F2FS MAX BLOCKS */
3842 if (unlikely(block
>= max_file_blocks(inode
)))
3845 if (f2fs_compressed_file(inode
)) {
3846 blknr
= f2fs_bmap_compress(inode
, block
);
3848 struct f2fs_map_blocks map
;
3850 memset(&map
, 0, sizeof(map
));
3853 map
.m_next_pgofs
= NULL
;
3854 map
.m_seg_type
= NO_CHECK_TYPE
;
3856 if (!f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_BMAP
))
3860 trace_f2fs_bmap(inode
, block
, blknr
);
3864 #ifdef CONFIG_MIGRATION
3865 #include <linux/migrate.h>
3867 int f2fs_migrate_page(struct address_space
*mapping
,
3868 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
3870 int rc
, extra_count
;
3871 struct f2fs_inode_info
*fi
= F2FS_I(mapping
->host
);
3872 bool atomic_written
= page_private_atomic(page
);
3874 BUG_ON(PageWriteback(page
));
3876 /* migrating an atomic written page is safe with the inmem_lock hold */
3877 if (atomic_written
) {
3878 if (mode
!= MIGRATE_SYNC
)
3880 if (!mutex_trylock(&fi
->inmem_lock
))
3884 /* one extra reference was held for atomic_write page */
3885 extra_count
= atomic_written
? 1 : 0;
3886 rc
= migrate_page_move_mapping(mapping
, newpage
,
3888 if (rc
!= MIGRATEPAGE_SUCCESS
) {
3890 mutex_unlock(&fi
->inmem_lock
);
3894 if (atomic_written
) {
3895 struct inmem_pages
*cur
;
3897 list_for_each_entry(cur
, &fi
->inmem_pages
, list
)
3898 if (cur
->page
== page
) {
3899 cur
->page
= newpage
;
3902 mutex_unlock(&fi
->inmem_lock
);
3907 /* guarantee to start from no stale private field */
3908 set_page_private(newpage
, 0);
3909 if (PagePrivate(page
)) {
3910 set_page_private(newpage
, page_private(page
));
3911 SetPagePrivate(newpage
);
3914 set_page_private(page
, 0);
3915 ClearPagePrivate(page
);
3919 if (mode
!= MIGRATE_SYNC_NO_COPY
)
3920 migrate_page_copy(newpage
, page
);
3922 migrate_page_states(newpage
, page
);
3924 return MIGRATEPAGE_SUCCESS
;
3929 static int f2fs_migrate_blocks(struct inode
*inode
, block_t start_blk
,
3930 unsigned int blkcnt
)
3932 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3933 unsigned int blkofs
;
3934 unsigned int blk_per_sec
= BLKS_PER_SEC(sbi
);
3935 unsigned int secidx
= start_blk
/ blk_per_sec
;
3936 unsigned int end_sec
= secidx
+ blkcnt
/ blk_per_sec
;
3939 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3940 filemap_invalidate_lock(inode
->i_mapping
);
3942 set_inode_flag(inode
, FI_ALIGNED_WRITE
);
3944 for (; secidx
< end_sec
; secidx
++) {
3945 down_write(&sbi
->pin_sem
);
3948 f2fs_allocate_new_section(sbi
, CURSEG_COLD_DATA_PINNED
, false);
3949 f2fs_unlock_op(sbi
);
3951 set_inode_flag(inode
, FI_DO_DEFRAG
);
3953 for (blkofs
= 0; blkofs
< blk_per_sec
; blkofs
++) {
3955 unsigned int blkidx
= secidx
* blk_per_sec
+ blkofs
;
3957 page
= f2fs_get_lock_data_page(inode
, blkidx
, true);
3959 up_write(&sbi
->pin_sem
);
3960 ret
= PTR_ERR(page
);
3964 set_page_dirty(page
);
3965 f2fs_put_page(page
, 1);
3968 clear_inode_flag(inode
, FI_DO_DEFRAG
);
3970 ret
= filemap_fdatawrite(inode
->i_mapping
);
3972 up_write(&sbi
->pin_sem
);
3979 clear_inode_flag(inode
, FI_DO_DEFRAG
);
3980 clear_inode_flag(inode
, FI_ALIGNED_WRITE
);
3982 filemap_invalidate_unlock(inode
->i_mapping
);
3983 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3988 static int check_swap_activate(struct swap_info_struct
*sis
,
3989 struct file
*swap_file
, sector_t
*span
)
3991 struct address_space
*mapping
= swap_file
->f_mapping
;
3992 struct inode
*inode
= mapping
->host
;
3993 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3994 sector_t cur_lblock
;
3995 sector_t last_lblock
;
3997 sector_t lowest_pblock
= -1;
3998 sector_t highest_pblock
= 0;
4000 unsigned long nr_pblocks
;
4001 unsigned int blks_per_sec
= BLKS_PER_SEC(sbi
);
4002 unsigned int sec_blks_mask
= BLKS_PER_SEC(sbi
) - 1;
4003 unsigned int not_aligned
= 0;
4007 * Map all the blocks into the extent list. This code doesn't try
4011 last_lblock
= bytes_to_blks(inode
, i_size_read(inode
));
4013 while (cur_lblock
< last_lblock
&& cur_lblock
< sis
->max
) {
4014 struct f2fs_map_blocks map
;
4018 memset(&map
, 0, sizeof(map
));
4019 map
.m_lblk
= cur_lblock
;
4020 map
.m_len
= last_lblock
- cur_lblock
;
4021 map
.m_next_pgofs
= NULL
;
4022 map
.m_next_extent
= NULL
;
4023 map
.m_seg_type
= NO_CHECK_TYPE
;
4024 map
.m_may_create
= false;
4026 ret
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_FIEMAP
);
4031 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
4032 f2fs_err(sbi
, "Swapfile has holes");
4037 pblock
= map
.m_pblk
;
4038 nr_pblocks
= map
.m_len
;
4040 if ((pblock
- SM_I(sbi
)->main_blkaddr
) & sec_blks_mask
||
4041 nr_pblocks
& sec_blks_mask
) {
4044 nr_pblocks
= roundup(nr_pblocks
, blks_per_sec
);
4045 if (cur_lblock
+ nr_pblocks
> sis
->max
)
4046 nr_pblocks
-= blks_per_sec
;
4049 /* this extent is last one */
4050 nr_pblocks
= map
.m_len
;
4051 f2fs_warn(sbi
, "Swapfile: last extent is not aligned to section");
4055 ret
= f2fs_migrate_blocks(inode
, cur_lblock
,
4062 if (cur_lblock
+ nr_pblocks
>= sis
->max
)
4063 nr_pblocks
= sis
->max
- cur_lblock
;
4065 if (cur_lblock
) { /* exclude the header page */
4066 if (pblock
< lowest_pblock
)
4067 lowest_pblock
= pblock
;
4068 if (pblock
+ nr_pblocks
- 1 > highest_pblock
)
4069 highest_pblock
= pblock
+ nr_pblocks
- 1;
4073 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4075 ret
= add_swap_extent(sis
, cur_lblock
, nr_pblocks
, pblock
);
4079 cur_lblock
+= nr_pblocks
;
4082 *span
= 1 + highest_pblock
- lowest_pblock
;
4083 if (cur_lblock
== 0)
4084 cur_lblock
= 1; /* force Empty message */
4085 sis
->max
= cur_lblock
;
4086 sis
->pages
= cur_lblock
- 1;
4087 sis
->highest_bit
= cur_lblock
- 1;
4090 f2fs_warn(sbi
, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
4091 not_aligned
, blks_per_sec
* F2FS_BLKSIZE
);
4095 static int f2fs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
4098 struct inode
*inode
= file_inode(file
);
4101 if (!S_ISREG(inode
->i_mode
))
4104 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
4107 if (f2fs_lfs_mode(F2FS_I_SB(inode
))) {
4108 f2fs_err(F2FS_I_SB(inode
),
4109 "Swapfile not supported in LFS mode");
4113 ret
= f2fs_convert_inline_inode(inode
);
4117 if (!f2fs_disable_compressed_file(inode
))
4120 f2fs_precache_extents(inode
);
4122 ret
= check_swap_activate(sis
, file
, span
);
4126 set_inode_flag(inode
, FI_PIN_FILE
);
4127 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
4131 static void f2fs_swap_deactivate(struct file
*file
)
4133 struct inode
*inode
= file_inode(file
);
4135 clear_inode_flag(inode
, FI_PIN_FILE
);
4138 static int f2fs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
4144 static void f2fs_swap_deactivate(struct file
*file
)
4149 const struct address_space_operations f2fs_dblock_aops
= {
4150 .readpage
= f2fs_read_data_page
,
4151 .readahead
= f2fs_readahead
,
4152 .writepage
= f2fs_write_data_page
,
4153 .writepages
= f2fs_write_data_pages
,
4154 .write_begin
= f2fs_write_begin
,
4155 .write_end
= f2fs_write_end
,
4156 .set_page_dirty
= f2fs_set_data_page_dirty
,
4157 .invalidatepage
= f2fs_invalidate_page
,
4158 .releasepage
= f2fs_release_page
,
4159 .direct_IO
= f2fs_direct_IO
,
4161 .swap_activate
= f2fs_swap_activate
,
4162 .swap_deactivate
= f2fs_swap_deactivate
,
4163 #ifdef CONFIG_MIGRATION
4164 .migratepage
= f2fs_migrate_page
,
4168 void f2fs_clear_page_cache_dirty_tag(struct page
*page
)
4170 struct address_space
*mapping
= page_mapping(page
);
4171 unsigned long flags
;
4173 xa_lock_irqsave(&mapping
->i_pages
, flags
);
4174 __xa_clear_mark(&mapping
->i_pages
, page_index(page
),
4175 PAGECACHE_TAG_DIRTY
);
4176 xa_unlock_irqrestore(&mapping
->i_pages
, flags
);
4179 int __init
f2fs_init_post_read_processing(void)
4181 bio_post_read_ctx_cache
=
4182 kmem_cache_create("f2fs_bio_post_read_ctx",
4183 sizeof(struct bio_post_read_ctx
), 0, 0, NULL
);
4184 if (!bio_post_read_ctx_cache
)
4186 bio_post_read_ctx_pool
=
4187 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS
,
4188 bio_post_read_ctx_cache
);
4189 if (!bio_post_read_ctx_pool
)
4190 goto fail_free_cache
;
4194 kmem_cache_destroy(bio_post_read_ctx_cache
);
4199 void f2fs_destroy_post_read_processing(void)
4201 mempool_destroy(bio_post_read_ctx_pool
);
4202 kmem_cache_destroy(bio_post_read_ctx_cache
);
4205 int f2fs_init_post_read_wq(struct f2fs_sb_info
*sbi
)
4207 if (!f2fs_sb_has_encrypt(sbi
) &&
4208 !f2fs_sb_has_verity(sbi
) &&
4209 !f2fs_sb_has_compression(sbi
))
4212 sbi
->post_read_wq
= alloc_workqueue("f2fs_post_read_wq",
4213 WQ_UNBOUND
| WQ_HIGHPRI
,
4215 if (!sbi
->post_read_wq
)
4220 void f2fs_destroy_post_read_wq(struct f2fs_sb_info
*sbi
)
4222 if (sbi
->post_read_wq
)
4223 destroy_workqueue(sbi
->post_read_wq
);
4226 int __init
f2fs_init_bio_entry_cache(void)
4228 bio_entry_slab
= f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4229 sizeof(struct bio_entry
));
4230 if (!bio_entry_slab
)
4235 void f2fs_destroy_bio_entry_cache(void)
4237 kmem_cache_destroy(bio_entry_slab
);